Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:24:13

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /**
0004  * @file
0005  *
0006  * @ingroup RTEMSScoreSchedulerSMP
0007  *
0008  * @brief This header file provides interfaces of the
0009  *   @ref RTEMSScoreSchedulerSMP which are only used by the implementation.
0010  */
0011 
0012 /*
0013  * Copyright (C) 2013, 2021 embedded brains GmbH & Co. KG
0014  *
0015  * Redistribution and use in source and binary forms, with or without
0016  * modification, are permitted provided that the following conditions
0017  * are met:
0018  * 1. Redistributions of source code must retain the above copyright
0019  *    notice, this list of conditions and the following disclaimer.
0020  * 2. Redistributions in binary form must reproduce the above copyright
0021  *    notice, this list of conditions and the following disclaimer in the
0022  *    documentation and/or other materials provided with the distribution.
0023  *
0024  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0025  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0026  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0027  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
0028  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0029  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0030  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0031  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0032  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0033  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
0034  * POSSIBILITY OF SUCH DAMAGE.
0035  */
0036 
0037 #ifndef _RTEMS_SCORE_SCHEDULERSMPIMPL_H
0038 #define _RTEMS_SCORE_SCHEDULERSMPIMPL_H
0039 
0040 #include <rtems/score/schedulersmp.h>
0041 #include <rtems/score/assert.h>
0042 #include <rtems/score/chainimpl.h>
0043 #include <rtems/score/schedulersimpleimpl.h>
0044 #include <rtems/bspIo.h>
0045 
0046 #ifdef __cplusplus
0047 extern "C" {
0048 #endif /* __cplusplus */
0049 
0050 /**
0051  * @addtogroup RTEMSScoreSchedulerSMP
0052  *
0053  * The scheduler nodes can be in four states
0054  * - @ref SCHEDULER_SMP_NODE_BLOCKED,
0055  * - @ref SCHEDULER_SMP_NODE_SCHEDULED, and
0056  * - @ref SCHEDULER_SMP_NODE_READY.
0057  *
0058  * State transitions are triggered via basic operations
0059  * - _Scheduler_SMP_Enqueue(),
0060  * - _Scheduler_SMP_Enqueue_scheduled(), and
0061  * - _Scheduler_SMP_Block().
0062  *
0063  * @dot
0064  * digraph {
0065  *   node [style="filled"];
0066  *
0067  *   bs [label="BLOCKED"];
0068  *   ss [label="SCHEDULED", fillcolor="green"];
0069  *   rs [label="READY", fillcolor="red"];
0070  *
0071  *   edge [label="enqueue"];
0072  *   edge [fontcolor="darkgreen", color="darkgreen"];
0073  *
0074  *   bs -> ss;
0075  *
0076  *   edge [fontcolor="red", color="red"];
0077  *
0078  *   bs -> rs;
0079  *
0080  *   edge [label="enqueue other"];
0081  *
0082  *   ss -> rs;
0083  *
0084  *   edge [label="block"];
0085  *   edge [fontcolor="black", color="black"];
0086  *
0087  *   ss -> bs;
0088  *   rs -> bs;
0089  *
0090  *   edge [label="block other"];
0091  *   edge [fontcolor="darkgreen", color="darkgreen"];
0092  *
0093  *   rs -> ss;
0094  * }
0095  * @enddot
0096  *
0097  * During system initialization each processor of the scheduler instance starts
0098  * with an idle thread assigned to it.  Lets have a look at an example with two
0099  * idle threads I and J with priority 5.  We also have blocked threads A, B and
0100  * C with priorities 1, 2 and 3 respectively.  The scheduler nodes are ordered
0101  * with respect to the thread priority from left to right in the below
0102  * diagrams.  The highest priority node (lowest priority number) is the
0103  * leftmost node.  Since the processor assignment is independent of the thread
0104  * priority the processor indices may move from one state to the other.
0105  *
0106  * @dot
0107  * digraph {
0108  *   node [style="filled"];
0109  *   edge [dir="none"];
0110  *   subgraph {
0111  *     rank = same;
0112  *
0113  *     i [label="I (5)", fillcolor="green"];
0114  *     j [label="J (5)", fillcolor="green"];
0115  *     a [label="A (1)"];
0116  *     b [label="B (2)"];
0117  *     c [label="C (3)"];
0118  *     i -> j;
0119  *   }
0120  *
0121  *   subgraph {
0122  *     rank = same;
0123  *
0124  *     p0 [label="PROCESSOR 0", shape="box"];
0125  *     p1 [label="PROCESSOR 1", shape="box"];
0126  *   }
0127  *
0128  *   i -> p0;
0129  *   j -> p1;
0130  * }
0131  * @enddot
0132  *
0133  * Lets start A.  For this an enqueue operation is performed.
0134  *
0135  * @dot
0136  * digraph {
0137  *   node [style="filled"];
0138  *   edge [dir="none"];
0139  *
0140  *   subgraph {
0141  *     rank = same;
0142  *
0143  *     i [label="I (5)", fillcolor="green"];
0144  *     j [label="J (5)", fillcolor="red"];
0145  *     a [label="A (1)", fillcolor="green"];
0146  *     b [label="B (2)"];
0147  *     c [label="C (3)"];
0148  *     a -> i;
0149  *   }
0150  *
0151  *   subgraph {
0152  *     rank = same;
0153  *
0154  *     p0 [label="PROCESSOR 0", shape="box"];
0155  *     p1 [label="PROCESSOR 1", shape="box"];
0156  *   }
0157  *
0158  *   i -> p0;
0159  *   a -> p1;
0160  * }
0161  * @enddot
0162  *
0163  * Lets start C.
0164  *
0165  * @dot
0166  * digraph {
0167  *   node [style="filled"];
0168  *   edge [dir="none"];
0169  *
0170  *   subgraph {
0171  *     rank = same;
0172  *
0173  *     a [label="A (1)", fillcolor="green"];
0174  *     c [label="C (3)", fillcolor="green"];
0175  *     i [label="I (5)", fillcolor="red"];
0176  *     j [label="J (5)", fillcolor="red"];
0177  *     b [label="B (2)"];
0178  *     a -> c;
0179  *     i -> j;
0180  *   }
0181  *
0182  *   subgraph {
0183  *     rank = same;
0184  *
0185  *     p0 [label="PROCESSOR 0", shape="box"];
0186  *     p1 [label="PROCESSOR 1", shape="box"];
0187  *   }
0188  *
0189  *   c -> p0;
0190  *   a -> p1;
0191  * }
0192  * @enddot
0193  *
0194  * Lets start B.
0195  *
0196  * @dot
0197  * digraph {
0198  *   node [style="filled"];
0199  *   edge [dir="none"];
0200  *
0201  *   subgraph {
0202  *     rank = same;
0203  *
0204  *     a [label="A (1)", fillcolor="green"];
0205  *     b [label="B (2)", fillcolor="green"];
0206  *     c [label="C (3)", fillcolor="red"];
0207  *     i [label="I (5)", fillcolor="red"];
0208  *     j [label="J (5)", fillcolor="red"];
0209  *     a -> b;
0210  *     c -> i -> j;
0211  *   }
0212  *
0213  *   subgraph {
0214  *     rank = same;
0215  *
0216  *     p0 [label="PROCESSOR 0", shape="box"];
0217  *     p1 [label="PROCESSOR 1", shape="box"];
0218  *   }
0219  *
0220  *   b -> p0;
0221  *   a -> p1;
0222  * }
0223  * @enddot
0224  *
0225  * Lets change the priority of thread A to 4.
0226  *
0227  * @dot
0228  * digraph {
0229  *   node [style="filled"];
0230  *   edge [dir="none"];
0231  *
0232  *   subgraph {
0233  *     rank = same;
0234  *
0235  *     b [label="B (2)", fillcolor="green"];
0236  *     c [label="C (3)", fillcolor="green"];
0237  *     a [label="A (4)", fillcolor="red"];
0238  *     i [label="I (5)", fillcolor="red"];
0239  *     j [label="J (5)", fillcolor="red"];
0240  *     b -> c;
0241  *     a -> i -> j;
0242  *   }
0243  *
0244  *   subgraph {
0245  *     rank = same;
0246  *
0247  *     p0 [label="PROCESSOR 0", shape="box"];
0248  *     p1 [label="PROCESSOR 1", shape="box"];
0249  *   }
0250  *
0251  *   b -> p0;
0252  *   c -> p1;
0253  * }
0254  * @enddot
0255  *
0256  * Now perform a blocking operation with thread B.  Please note that thread A
0257  * migrated now from processor 0 to processor 1 and thread C still executes on
0258  * processor 1.
0259  *
0260  * @dot
0261  * digraph {
0262  *   node [style="filled"];
0263  *   edge [dir="none"];
0264  *
0265  *   subgraph {
0266  *     rank = same;
0267  *
0268  *     c [label="C (3)", fillcolor="green"];
0269  *     a [label="A (4)", fillcolor="green"];
0270  *     i [label="I (5)", fillcolor="red"];
0271  *     j [label="J (5)", fillcolor="red"];
0272  *     b [label="B (2)"];
0273  *     c -> a;
0274  *     i -> j;
0275  *   }
0276  *
0277  *   subgraph {
0278  *     rank = same;
0279  *
0280  *     p0 [label="PROCESSOR 0", shape="box"];
0281  *     p1 [label="PROCESSOR 1", shape="box"];
0282  *   }
0283  *
0284  *   a -> p0;
0285  *   c -> p1;
0286  * }
0287  * @enddot
0288  *
0289  * @{
0290  */
0291 
0292 typedef bool ( *Scheduler_SMP_Has_ready )(
0293   Scheduler_Context *context
0294 );
0295 
0296 typedef Scheduler_Node *( *Scheduler_SMP_Get_highest_ready )(
0297   Scheduler_Context *context,
0298   Scheduler_Node    *filter
0299 );
0300 
0301 typedef Scheduler_Node *( *Scheduler_SMP_Get_lowest_ready )(
0302   Scheduler_Context *context
0303 );
0304 
0305 typedef Scheduler_Node *( *Scheduler_SMP_Get_lowest_scheduled )(
0306   Scheduler_Context *context,
0307   Scheduler_Node    *filter
0308 );
0309 
0310 typedef void ( *Scheduler_SMP_Extract )(
0311   Scheduler_Context *context,
0312   Scheduler_Node    *node_to_extract
0313 );
0314 
0315 typedef void ( *Scheduler_SMP_Insert )(
0316   Scheduler_Context *context,
0317   Scheduler_Node    *node_to_insert,
0318   Priority_Control   insert_priority
0319 );
0320 
0321 typedef void ( *Scheduler_SMP_Move )(
0322   Scheduler_Context *context,
0323   Scheduler_Node    *node_to_move
0324 );
0325 
0326 typedef bool ( *Scheduler_SMP_Ask_for_help )(
0327   Scheduler_Context *context,
0328   Thread_Control    *thread,
0329   Scheduler_Node    *node
0330 );
0331 
0332 typedef void ( *Scheduler_SMP_Update )(
0333   Scheduler_Context *context,
0334   Scheduler_Node    *node_to_update,
0335   Priority_Control   new_priority
0336 );
0337 
0338 typedef void ( *Scheduler_SMP_Set_affinity )(
0339   Scheduler_Context *context,
0340   Scheduler_Node    *node,
0341   void              *arg
0342 );
0343 
0344 typedef bool ( *Scheduler_SMP_Enqueue )(
0345   Scheduler_Context *context,
0346   Scheduler_Node    *node_to_enqueue,
0347   Priority_Control   priority
0348 );
0349 
0350 typedef void ( *Scheduler_SMP_Enqueue_scheduled )(
0351   Scheduler_Context *context,
0352   Scheduler_Node    *node_to_enqueue,
0353   Priority_Control   priority
0354 );
0355 
0356 typedef void ( *Scheduler_SMP_Allocate_processor )(
0357   Scheduler_Context *context,
0358   Scheduler_Node    *scheduled,
0359   Per_CPU_Control   *cpu
0360 );
0361 
0362 typedef void ( *Scheduler_SMP_Register_idle )(
0363   Scheduler_Context *context,
0364   Scheduler_Node    *idle,
0365   Per_CPU_Control   *cpu
0366 );
0367 
0368 /**
0369  * @brief Does nothing.
0370  *
0371  * @param context This parameter is unused.
0372  * @param idle This parameter is unused.
0373  * @param cpu This parameter is unused.
0374  */
0375 static inline void _Scheduler_SMP_Do_nothing_register_idle(
0376   Scheduler_Context *context,
0377   Scheduler_Node    *idle,
0378   Per_CPU_Control   *cpu
0379 )
0380 {
0381   (void) context;
0382   (void) idle;
0383   (void) cpu;
0384 }
0385 
0386 /**
0387  * @brief Checks if @a to_insert is less or equal than the priority of the chain node.
0388  *
0389  * @param key is the priority to compare.
0390  *
0391  * @param to_insert is the chain node to insert.
0392  *
0393  * @param next is the chain node to compare the priority of.
0394  *
0395  * @retval true @a to_insert is less or equal than the priority of @a next.
0396  * @retval false @a to_insert is greater than the priority of @a next.
0397  */
0398 static inline bool _Scheduler_SMP_Priority_less_equal(
0399   const void       *key,
0400   const Chain_Node *to_insert,
0401   const Chain_Node *next
0402 )
0403 {
0404   const Priority_Control   *priority_to_insert;
0405   const Scheduler_SMP_Node *node_next;
0406 
0407   (void) to_insert;
0408   priority_to_insert = (const Priority_Control *) key;
0409   node_next = (const Scheduler_SMP_Node *) next;
0410 
0411   return *priority_to_insert <= node_next->priority;
0412 }
0413 
0414 /**
0415  * @brief Gets the scheduler smp context.
0416  *
0417  * @param context The context to cast to Scheduler_SMP_Context *.
0418  *
0419  * @return @a context cast to Scheduler_SMP_Context *.
0420  */
0421 static inline Scheduler_SMP_Context *_Scheduler_SMP_Get_self(
0422   Scheduler_Context *context
0423 )
0424 {
0425   return (Scheduler_SMP_Context *) context;
0426 }
0427 
0428 /**
0429  * @brief Initializes the scheduler smp context.
0430  *
0431  * @param[out] self The context to initialize.
0432  */
0433 static inline void _Scheduler_SMP_Initialize(
0434   Scheduler_SMP_Context *self
0435 )
0436 {
0437   _Chain_Initialize_empty( &self->Scheduled );
0438 }
0439 
0440 /**
0441  * @brief Gets the scheduler smp node of the thread.
0442  *
0443  * @param thread The thread to get the smp node of.
0444  *
0445  * @return The scheduler smp node of @a thread.
0446  */
0447 static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_node(
0448   Thread_Control *thread
0449 )
0450 {
0451   return (Scheduler_SMP_Node *) _Thread_Scheduler_get_home_node( thread );
0452 }
0453 
0454 /**
0455  * @brief Gets the scheduler smp node of the thread.
0456  *
0457  * @param thread The thread to get the smp node of.
0458  *
0459  * @return The scheduler smp node of @a thread.
0460  */
0461 static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_own_node(
0462   Thread_Control *thread
0463 )
0464 {
0465   return (Scheduler_SMP_Node *) _Thread_Scheduler_get_home_node( thread );
0466 }
0467 
0468 /**
0469  * @brief Gets the scheduler smp node.
0470  *
0471  * @param node The node to cast to Scheduler_SMP_Node *.
0472  *
0473  * @return @a node cast to Scheduler_SMP_Node *.
0474  */
0475 static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_downcast(
0476   Scheduler_Node *node
0477 )
0478 {
0479   return (Scheduler_SMP_Node *) node;
0480 }
0481 
0482 /**
0483  * @brief Gets the state of the node.
0484  *
0485  * @param node The node to get the state of.
0486  *
0487  * @return The state of @a node.
0488  */
0489 static inline Scheduler_SMP_Node_state _Scheduler_SMP_Node_state(
0490   const Scheduler_Node *node
0491 )
0492 {
0493   return ( (const Scheduler_SMP_Node *) node )->state;
0494 }
0495 
0496 /**
0497  * @brief Gets the priority of the node.
0498  *
0499  * @param node The node to get the priority of.
0500  *
0501  * @return The priority of @a node.
0502  */
0503 static inline Priority_Control _Scheduler_SMP_Node_priority(
0504   const Scheduler_Node *node
0505 )
0506 {
0507   return ( (const Scheduler_SMP_Node *) node )->priority;
0508 }
0509 
0510 /**
0511  * @brief Initializes the scheduler smp node.
0512  *
0513  * @param scheduler The scheduler instance.
0514  * @param[out] node The node to initialize.
0515  * @param thread The thread of the scheduler smp node.
0516  * @param priority The priority to initialize @a node with.
0517  */
0518 static inline void _Scheduler_SMP_Node_initialize(
0519   const Scheduler_Control *scheduler,
0520   Scheduler_SMP_Node      *node,
0521   Thread_Control          *thread,
0522   Priority_Control         priority
0523 )
0524 {
0525   _Scheduler_Node_do_initialize( scheduler, &node->Base, thread, priority );
0526   node->state = SCHEDULER_SMP_NODE_BLOCKED;
0527   node->priority = priority;
0528 }
0529 
0530 /**
0531  * @brief Updates the priority of the node to the new priority.
0532  *
0533  * @param[out] node The node to update the priority of.
0534  * @param new_priority The new priority for @a node.
0535  */
0536 static inline void _Scheduler_SMP_Node_update_priority(
0537   Scheduler_SMP_Node *node,
0538   Priority_Control    new_priority
0539 )
0540 {
0541   node->priority = new_priority;
0542 }
0543 
0544 /**
0545  * @brief Changes the state of the node to the given state.
0546  *
0547  * @param[out] node the node to change the state of.
0548  * @param new_state The new state for @a node.
0549  */
0550 static inline void _Scheduler_SMP_Node_change_state(
0551   Scheduler_Node           *node,
0552   Scheduler_SMP_Node_state  new_state
0553 )
0554 {
0555   Scheduler_SMP_Node *the_node;
0556 
0557   the_node = _Scheduler_SMP_Node_downcast( node );
0558   the_node->state = new_state;
0559 }
0560 
0561 /**
0562  * @brief Checks if the processor is owned by the given context.
0563  *
0564  * @param context The context to check whether @a cpu is owned by it.
0565  * @param cpu The cpu to check whether it is owned by @a context.
0566  *
0567  * @retval true @a cpu is owned by @a context.
0568  * @retval false @a cpu is not owned by @a context.
0569  */
0570 static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
0571   const Scheduler_Context *context,
0572   const Per_CPU_Control   *cpu
0573 )
0574 {
0575   return cpu->Scheduler.context == context;
0576 }
0577 
0578 /**
0579  * @brief Removes the thread's ask for help request from the processor.
0580  *
0581  * The caller must be the owner of the thread's scheduler lock.
0582  *
0583  * @param[in, out] thread is the thread of the ask for help request.
0584  *
0585  * @param[in, out] cpu is the processor from which the ask for help request
0586  *   should be removed.
0587  */
0588 void _Scheduler_SMP_Remove_ask_for_help_from_processor(
0589   Thread_Control  *thread,
0590   Per_CPU_Control *cpu
0591 );
0592 
0593 /**
0594  * @brief Cancels the thread's ask for help request.
0595  *
0596  * The caller must be the owner of the thread's scheduler lock.
0597  *
0598  * @param[in, out] thread is the thread of the ask help request.
0599  */
0600 static inline void _Scheduler_SMP_Cancel_ask_for_help( Thread_Control *thread )
0601 {
0602   Per_CPU_Control *cpu;
0603 
0604   _Assert( _ISR_lock_Is_owner( &thread->Scheduler.Lock ) );
0605   cpu = thread->Scheduler.ask_for_help_cpu;
0606 
0607   if ( RTEMS_PREDICT_FALSE( cpu != NULL ) ) {
0608     _Scheduler_SMP_Remove_ask_for_help_from_processor( thread, cpu );
0609   }
0610 }
0611 
0612 /**
0613  * @brief Requests to ask for help for the thread.
0614  *
0615  * The actual ask for help operations are carried out during
0616  * _Thread_Do_dispatch() on the current processor.
0617  *
0618  * An alternative approach would be to carry out the requests on a processor
0619  * related to the thread.  This could reduce the overhead for the preempting
0620  * thread a bit, however, there are at least two problems with this approach.
0621  * Firstly, we have to figure out what is a processor related to the thread.
0622  * Secondly, we may need an inter-processor interrupt.
0623  *
0624  * @param[in, out] thread is the thread in need for help.
0625  */
0626 static inline void _Scheduler_SMP_Request_ask_for_help( Thread_Control *thread )
0627 {
0628   ISR_lock_Context lock_context;
0629   Per_CPU_Control *cpu_self;
0630 
0631   cpu_self = _Per_CPU_Get();
0632 
0633   _Assert( thread->Scheduler.ask_for_help_cpu == NULL );
0634   thread->Scheduler.ask_for_help_cpu = cpu_self;
0635   cpu_self->dispatch_necessary = true;
0636 
0637   _Per_CPU_Acquire( cpu_self, &lock_context );
0638   _Chain_Append_unprotected(
0639     &cpu_self->Threads_in_need_for_help,
0640     &thread->Scheduler.Help_node
0641   );
0642   _Per_CPU_Release( cpu_self, &lock_context );
0643 }
0644 
0645 /**
0646  * @brief This enumeration defines what a scheduler should do with a node which
0647  * could be scheduled.
0648  */
0649 typedef enum {
0650   SCHEDULER_SMP_DO_SCHEDULE,
0651   SCHEDULER_SMP_DO_NOT_SCHEDULE
0652 } Scheduler_SMP_Action;
0653 
0654 /**
0655  * @brief Tries to schedule the scheduler node.
0656  *
0657  * When an SMP scheduler needs to schedule a node, it shall use this function
0658  * to determine what it shall do with the node.
0659  *
0660  * This function uses the state of the node and the scheduler state of the
0661  * owner thread to determine what shall be done.  Each scheduler maintains its
0662  * nodes independent of other schedulers.  This function ensures that a thread
0663  * is scheduled by at most one scheduler.  If a node requires an executing
0664  * thread due to some locking protocol and the owner thread is already
0665  * scheduled by another scheduler, then an idle thread will be attached to the
0666  * node.
0667  *
0668  * @param[in, out] node is the node which should be scheduled.
0669  *
0670  * @param get_idle_node is the get idle node handler.
0671  *
0672  * @param arg is the get idle node handler argument.
0673  *
0674  * @retval SCHEDULER_SMP_DO_SCHEDULE The node shall be scheduled.
0675  *
0676  * @retval SCHEDULER_SMP_DO_NOT_SCHEDULE The node shall be blocked.  This
0677  *   action is returned, if the owner thread is already scheduled by another
0678  *   scheduler.
0679  */
0680 static inline Scheduler_SMP_Action _Scheduler_SMP_Try_to_schedule(
0681   Scheduler_Node          *node,
0682   Scheduler_Get_idle_node  get_idle_node,
0683   void                    *arg
0684 )
0685 {
0686   ISR_lock_Context        lock_context;
0687   Thread_Control         *owner;
0688   Thread_Scheduler_state  owner_state;
0689   int                     owner_sticky_level;
0690 
0691   owner = _Scheduler_Node_get_owner( node );
0692   _Assert( _Scheduler_Node_get_idle( node ) == NULL );
0693 
0694   _Thread_Scheduler_acquire_critical( owner, &lock_context );
0695   owner_state = owner->Scheduler.state;
0696   owner_sticky_level = node->sticky_level;
0697 
0698   if ( RTEMS_PREDICT_TRUE( owner_state == THREAD_SCHEDULER_READY ) ) {
0699     _Scheduler_SMP_Cancel_ask_for_help( owner );
0700     _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
0701     _Thread_Scheduler_release_critical( owner, &lock_context );
0702     return SCHEDULER_SMP_DO_SCHEDULE;
0703   }
0704 
0705   _Thread_Scheduler_release_critical( owner, &lock_context );
0706 
0707   if (
0708     ( owner_state == THREAD_SCHEDULER_SCHEDULED && owner_sticky_level <= 1 ) ||
0709     owner_sticky_level == 0
0710   ) {
0711     _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
0712 
0713     return SCHEDULER_SMP_DO_NOT_SCHEDULE;
0714   }
0715 
0716   (void) _Scheduler_Use_idle_thread( node, get_idle_node, arg );
0717 
0718   return SCHEDULER_SMP_DO_SCHEDULE;
0719 }
0720 
0721 /**
0722  * @brief Allocates a processor to the user of the scheduled node.
0723  *
0724  * Attempts to prevent migrations but does not take into account affinity.
0725  *
0726  * @param[in, out] context is the scheduler context.
0727  *
0728  * @param[in, out] scheduled is the scheduled node that gets the processor allocated.
0729  *
0730  * @param[in, out] cpu is the processor to allocate.
0731  */
0732 static inline void _Scheduler_SMP_Allocate_processor_lazy(
0733   Scheduler_Context *context,
0734   Scheduler_Node    *scheduled,
0735   Per_CPU_Control   *cpu
0736 )
0737 {
0738   Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
0739   Per_CPU_Control *scheduled_cpu = _Thread_Get_CPU( scheduled_thread );
0740   Per_CPU_Control *cpu_self = _Per_CPU_Get();
0741 
0742   _Assert( _ISR_Get_level() != 0 );
0743 
0744   if ( cpu == scheduled_cpu ) {
0745     _Thread_Set_CPU( scheduled_thread, cpu );
0746     _Thread_Dispatch_update_heir( cpu_self, cpu, scheduled_thread );
0747 
0748     return;
0749   }
0750 
0751   if (
0752     _Thread_Is_executing_on_a_processor( scheduled_thread ) &&
0753     _Scheduler_SMP_Is_processor_owned_by_us( context, scheduled_cpu )
0754   ) {
0755     Thread_Control *heir = scheduled_cpu->heir;
0756     _Thread_Dispatch_update_heir( cpu_self, scheduled_cpu, scheduled_thread );
0757     _Thread_Set_CPU( heir, cpu );
0758     _Thread_Dispatch_update_heir( cpu_self, cpu, heir );
0759 
0760     return;
0761   }
0762 
0763   _Thread_Set_CPU( scheduled_thread, cpu );
0764   _Thread_Dispatch_update_heir( cpu_self, cpu, scheduled_thread );
0765 }
0766 
0767 /**
0768  * @brief Allocates exactly the processor to the user of the scheduled node.
0769  *
0770  * This method is slightly different from
0771  * _Scheduler_SMP_Allocate_processor_lazy() in that it does what it is asked to
0772  * do.  _Scheduler_SMP_Allocate_processor_lazy() attempts to prevent migrations
0773  * but does not take into account affinity.
0774  *
0775  * @param[in, out] context is the scheduler context.
0776  *
0777  * @param[in, out] scheduled is the scheduled node that gets the processor allocated.
0778  *
0779  * @param[in, out] cpu is the processor to allocate.
0780  */
0781 static inline void _Scheduler_SMP_Allocate_processor_exact(
0782   Scheduler_Context *context,
0783   Scheduler_Node    *scheduled,
0784   Per_CPU_Control   *cpu
0785 )
0786 {
0787   Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
0788   Per_CPU_Control *cpu_self = _Per_CPU_Get();
0789 
0790   (void) context;
0791 
0792   _Thread_Set_CPU( scheduled_thread, cpu );
0793   _Thread_Dispatch_update_heir( cpu_self, cpu, scheduled_thread );
0794 }
0795 
0796 /**
0797  * @brief Allocates the processor to the user of the scheduled node using the
0798  *   given allocation handler.
0799  *
0800  * @param[in, out] context is the scheduler context.
0801  *
0802  * @param[in, out] scheduled is the scheduled node that gets the processor allocated.
0803  *
0804  * @param[in, out] cpu is the processor to allocate.
0805  *
0806  * @param allocate_processor is the handler which should allocate the processor.
0807  */
0808 static inline void _Scheduler_SMP_Allocate_processor(
0809   Scheduler_Context                *context,
0810   Scheduler_Node                   *scheduled,
0811   Per_CPU_Control                  *cpu,
0812   Scheduler_SMP_Allocate_processor  allocate_processor
0813 )
0814 {
0815   _Scheduler_SMP_Node_change_state( scheduled, SCHEDULER_SMP_NODE_SCHEDULED );
0816   ( *allocate_processor )( context, scheduled, cpu );
0817 }
0818 
0819 /**
0820  * @brief Preempts the victim's thread and allocates a processor for the user
0821  *   of the scheduled node.
0822  *
0823  * @param[in, out] context is the scheduler context.
0824  *
0825  * @param scheduled[in, out] is the node of the user thread that is about to
0826  *   get a processor allocated.
0827  *
0828  * @param[in, out] victim is the victim node of the thread to preempt.
0829  *
0830  * @param[in, out] victim_idle is the idle thread used by the victim node or NULL.
0831  *
0832  * @param allocate_processor The function for allocation of a processor for the new thread.
0833  */
0834 static inline void _Scheduler_SMP_Preempt(
0835   Scheduler_Context                *context,
0836   Scheduler_Node                   *scheduled,
0837   Scheduler_Node                   *victim,
0838   Thread_Control                   *victim_idle,
0839   Scheduler_SMP_Allocate_processor  allocate_processor
0840 )
0841 {
0842   Thread_Control   *victim_owner;
0843   ISR_lock_Context  lock_context;
0844   Per_CPU_Control  *cpu;
0845 
0846   _Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_READY );
0847 
0848   victim_owner = _Scheduler_Node_get_owner( victim );
0849   _Thread_Scheduler_acquire_critical( victim_owner, &lock_context );
0850 
0851   if ( RTEMS_PREDICT_TRUE( victim_idle == NULL ) ) {
0852     if ( victim_owner->Scheduler.state == THREAD_SCHEDULER_SCHEDULED ) {
0853       _Scheduler_Thread_change_state( victim_owner, THREAD_SCHEDULER_READY );
0854 
0855       if ( victim_owner->Scheduler.helping_nodes > 0 ) {
0856         _Scheduler_SMP_Request_ask_for_help( victim_owner );
0857       }
0858     }
0859 
0860     cpu = _Thread_Get_CPU( victim_owner );
0861   } else {
0862     cpu = _Thread_Get_CPU( victim_idle );
0863   }
0864 
0865   _Thread_Scheduler_release_critical( victim_owner, &lock_context );
0866 
0867   _Scheduler_SMP_Allocate_processor(
0868     context,
0869     scheduled,
0870     cpu,
0871     allocate_processor
0872   );
0873 }
0874 
0875 /**
0876  * @brief Returns the lowest member of the scheduled nodes.
0877  *
0878  * @param context The scheduler context instance.
0879  * @param filter This parameter is unused.
0880  *
0881  * @return The lowest scheduled node.
0882  */
0883 static inline Scheduler_Node *_Scheduler_SMP_Get_lowest_scheduled(
0884   Scheduler_Context *context,
0885   Scheduler_Node    *filter
0886 )
0887 {
0888   Scheduler_SMP_Context *self;
0889   Scheduler_Node        *lowest_scheduled;
0890 
0891   (void) filter;
0892 
0893   self = _Scheduler_SMP_Get_self( context );
0894 
0895   _Assert( !_Chain_Is_empty( &self->Scheduled ) );
0896   lowest_scheduled = (Scheduler_Node *) _Chain_Last( &self->Scheduled );
0897 
0898   _Assert(
0899     _Chain_Next( &lowest_scheduled->Node.Chain ) ==
0900       _Chain_Tail( &self->Scheduled )
0901   );
0902 
0903   return lowest_scheduled;
0904 }
0905 
0906 /**
0907  * @brief Tries to schedule the given node.
0908  *
0909  * Schedules the node, or blocks if that is necessary.
0910  *
0911  * @param context The scheduler context instance.
0912  * @param[in, out] node The node to insert into the scheduled nodes.
0913  * @param priority The priority of @a node.
0914  * @param[in, out] lowest_scheduled The lowest member of the scheduled nodes.
0915  * @param insert_scheduled Function to insert a node into the set of
0916  *   scheduled nodes.
0917  * @param move_from_scheduled_to_ready Function to move a node from the set
0918  *   of scheduled nodes to the set of ready nodes.
0919  * @param allocate_processor Function to allocate a processor to a node
0920  *   based on the rules of the scheduler.
0921  */
0922 static inline void _Scheduler_SMP_Enqueue_to_scheduled(
0923   Scheduler_Context                *context,
0924   Scheduler_Node                   *node,
0925   Priority_Control                  priority,
0926   Scheduler_Node                   *lowest_scheduled,
0927   Scheduler_SMP_Insert              insert_scheduled,
0928   Scheduler_SMP_Move                move_from_scheduled_to_ready,
0929   Scheduler_SMP_Move                move_from_ready_to_scheduled,
0930   Scheduler_SMP_Allocate_processor  allocate_processor,
0931   Scheduler_Get_idle_node           get_idle_node,
0932   Scheduler_Release_idle_node       release_idle_node
0933 )
0934 {
0935   Thread_Control      *lowest_scheduled_idle;
0936   Scheduler_SMP_Action action;
0937 
0938   lowest_scheduled_idle = _Scheduler_Release_idle_thread_if_necessary(
0939     lowest_scheduled,
0940     release_idle_node,
0941     context
0942   );
0943 
0944   ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
0945 
0946   action = _Scheduler_SMP_Try_to_schedule( node, get_idle_node, context );
0947 
0948   if ( RTEMS_PREDICT_TRUE( action == SCHEDULER_SMP_DO_SCHEDULE ) ) {
0949     _Scheduler_SMP_Preempt(
0950       context,
0951       node,
0952       lowest_scheduled,
0953       lowest_scheduled_idle,
0954       allocate_processor
0955     );
0956 
0957     ( *insert_scheduled )( context, node, priority );
0958   } else {
0959     _Assert( action == SCHEDULER_SMP_DO_NOT_SCHEDULE );
0960 
0961     if ( lowest_scheduled_idle != NULL ) {
0962       (void) _Scheduler_Use_idle_thread( lowest_scheduled, get_idle_node, context );
0963     }
0964 
0965     ( *move_from_ready_to_scheduled )( context, lowest_scheduled );
0966   }
0967 }
0968 
0969 /**
0970  * @brief Enqueues a node according to the specified order function.
0971  *
0972  * The node must not be in the scheduled state.
0973  *
0974  * @param context The scheduler instance context.
0975  * @param[in, out] node The node to enqueue.
0976  * @param priority The node insert priority.
0977  * @param order The order function.
0978  * @param insert_ready Function to insert a node into the set of ready
0979  *   nodes.
0980  * @param insert_scheduled Function to insert a node into the set of
0981  *   scheduled nodes.
0982  * @param move_from_scheduled_to_ready Function to move a node from the set
0983  *   of scheduled nodes to the set of ready nodes.
0984  * @param get_lowest_scheduled Function to select the node from the
0985  *   scheduled nodes to replace.  It may not be possible to find one, in this
0986  *   case a pointer must be returned so that the order functions returns false
0987  *   if this pointer is passed as the second argument to the order function.
0988  * @param allocate_processor Function to allocate a processor to a node
0989  *   based on the rules of the scheduler.
0990  */
0991 static inline bool _Scheduler_SMP_Enqueue(
0992   Scheduler_Context                  *context,
0993   Scheduler_Node                     *node,
0994   Priority_Control                    insert_priority,
0995   Chain_Node_order                    order,
0996   Scheduler_SMP_Insert                insert_ready,
0997   Scheduler_SMP_Insert                insert_scheduled,
0998   Scheduler_SMP_Move                  move_from_scheduled_to_ready,
0999   Scheduler_SMP_Move                  move_from_ready_to_scheduled,
1000   Scheduler_SMP_Get_lowest_scheduled  get_lowest_scheduled,
1001   Scheduler_SMP_Allocate_processor    allocate_processor,
1002   Scheduler_Get_idle_node             get_idle_node,
1003   Scheduler_Release_idle_node         release_idle_node
1004 )
1005 {
1006   bool            needs_help;
1007   Scheduler_Node *lowest_scheduled;
1008 
1009   lowest_scheduled = ( *get_lowest_scheduled )( context, node );
1010 
1011   if (
1012     ( *order )(
1013       &insert_priority,
1014       &node->Node.Chain,
1015       &lowest_scheduled->Node.Chain
1016     )
1017   ) {
1018     _Scheduler_SMP_Enqueue_to_scheduled(
1019       context,
1020       node,
1021       insert_priority,
1022       lowest_scheduled,
1023       insert_scheduled,
1024       move_from_scheduled_to_ready,
1025       move_from_ready_to_scheduled,
1026       allocate_processor,
1027       get_idle_node,
1028       release_idle_node
1029     );
1030     needs_help = false;
1031   } else {
1032     _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
1033     ( *insert_ready )( context, node, insert_priority );
1034     needs_help = true;
1035   }
1036 
1037   return needs_help;
1038 }
1039 
1040 /**
1041  * @brief Enqueues a scheduled node according to the specified order
1042  * function.
1043  *
1044  * @param context The scheduler instance context.
1045  * @param[in, out] node The node to enqueue.
1046  * @param order The order function.
1047  * @param extract_from_ready Function to extract a node from the set of
1048  *   ready nodes.
1049  * @param get_highest_ready Function to get the highest ready node.
1050  * @param insert_ready Function to insert a node into the set of ready
1051  *   nodes.
1052  * @param insert_scheduled Function to insert a node into the set of
1053  *   scheduled nodes.
1054  * @param move_from_ready_to_scheduled Function to move a node from the set
1055  *   of ready nodes to the set of scheduled nodes.
1056  * @param allocate_processor Function to allocate a processor to a node
1057  *   based on the rules of the scheduler.
1058  */
1059 static inline void _Scheduler_SMP_Enqueue_scheduled(
1060   Scheduler_Context                *context,
1061   Scheduler_Node                   *const node,
1062   Priority_Control                  insert_priority,
1063   Chain_Node_order                  order,
1064   Scheduler_SMP_Extract             extract_from_ready,
1065   Scheduler_SMP_Get_highest_ready   get_highest_ready,
1066   Scheduler_SMP_Insert              insert_ready,
1067   Scheduler_SMP_Insert              insert_scheduled,
1068   Scheduler_SMP_Move                move_from_ready_to_scheduled,
1069   Scheduler_SMP_Allocate_processor  allocate_processor,
1070   Scheduler_Get_idle_node           get_idle_node,
1071   Scheduler_Release_idle_node       release_idle_node
1072 )
1073 {
1074   Thread_Control *node_idle;
1075 
1076   node_idle = _Scheduler_Release_idle_thread_if_necessary(
1077     node,
1078     release_idle_node,
1079     context
1080   );
1081 
1082   while ( true ) {
1083     Scheduler_Node       *highest_ready;
1084     Scheduler_SMP_Action  action;
1085 
1086     highest_ready = ( *get_highest_ready )( context, node );
1087 
1088     /*
1089      * The node has been extracted from the scheduled chain.  We have to place
1090      * it now on the scheduled or ready set.
1091      */
1092     if (
1093       node->sticky_level > 0 && ( *order )(
1094         &insert_priority,
1095         &node->Node.Chain,
1096         &highest_ready->Node.Chain
1097       )
1098     ) {
1099       if ( node_idle != NULL ) {
1100         Thread_Control   *owner;
1101         ISR_lock_Context  lock_context;
1102 
1103         owner = _Scheduler_Node_get_owner( node );
1104         _Thread_Scheduler_acquire_critical( owner, &lock_context );
1105 
1106         if ( owner->Scheduler.state == THREAD_SCHEDULER_READY ) {
1107           Per_CPU_Control *cpu;
1108 
1109           _Scheduler_SMP_Cancel_ask_for_help( owner );
1110           _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
1111           cpu = _Thread_Get_CPU( node_idle );
1112           _Thread_Set_CPU( owner, cpu );
1113           _Thread_Scheduler_release_critical( owner, &lock_context );
1114           _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, owner );
1115         } else {
1116           Thread_Control *new_idle;
1117 
1118           _Thread_Scheduler_release_critical( owner, &lock_context );
1119           new_idle = _Scheduler_Use_idle_thread( node, get_idle_node, context );
1120           _Assert_Unused_variable_equals( new_idle, node_idle );
1121         }
1122       }
1123 
1124       ( *insert_scheduled )( context, node, insert_priority );
1125 
1126       return;
1127     }
1128 
1129     action = _Scheduler_SMP_Try_to_schedule(
1130       highest_ready,
1131       get_idle_node,
1132       context
1133     );
1134 
1135     if ( RTEMS_PREDICT_TRUE( action == SCHEDULER_SMP_DO_SCHEDULE ) ) {
1136       _Scheduler_SMP_Preempt(
1137         context,
1138         highest_ready,
1139         node,
1140         node_idle,
1141         allocate_processor
1142       );
1143 
1144       ( *move_from_ready_to_scheduled )( context, highest_ready );
1145       ( *insert_ready )( context, node, insert_priority );
1146       return;
1147     }
1148 
1149     _Assert( action == SCHEDULER_SMP_DO_NOT_SCHEDULE );
1150     ( *extract_from_ready )( context, highest_ready );
1151   }
1152 }
1153 
1154 /**
1155  * @brief Extracts a scheduled node from the scheduled nodes.
1156  *
1157  * @param context This parameter is unused.
1158  * @param node The node to extract from the chain it belongs to.
1159  */
1160 static inline void _Scheduler_SMP_Extract_from_scheduled(
1161   Scheduler_Context *context,
1162   Scheduler_Node    *node
1163 )
1164 {
1165   (void) context;
1166   _Chain_Extract_unprotected( &node->Node.Chain );
1167 }
1168 
1169 /**
1170  * @brief Schedules the highest ready node.
1171  *
1172  * @param context The scheduler context instance.
1173  * @param victim The node of the thread that is repressed by the newly scheduled thread.
1174  * @param cpu is the processor to allocate.
1175  * @param extract_from_scheduled Function to extract a node from the set of
1176  *      scheduled nodes.
1177  * @param extract_from_ready Function to extract a node from the set of
1178  *      ready nodes.
1179  * @param get_highest_ready Function to get the highest ready node.
1180  * @param move_from_ready_to_scheduled Function to move a node from the set
1181  *      of ready nodes to the set of scheduled nodes.
1182  * @param allocate_processor Function to allocate a processor to a node
1183  *      based on the rules of the scheduler.
1184  */
1185 static inline void _Scheduler_SMP_Schedule_highest_ready(
1186   Scheduler_Context                *context,
1187   Scheduler_Node                   *victim,
1188   Per_CPU_Control                  *cpu,
1189   Scheduler_SMP_Extract             extract_from_scheduled,
1190   Scheduler_SMP_Extract             extract_from_ready,
1191   Scheduler_SMP_Get_highest_ready   get_highest_ready,
1192   Scheduler_SMP_Move                move_from_ready_to_scheduled,
1193   Scheduler_SMP_Allocate_processor  allocate_processor,
1194   Scheduler_Get_idle_node           get_idle_node
1195 )
1196 {
1197   Scheduler_SMP_Action action;
1198 
1199   _Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_BLOCKED );
1200   ( *extract_from_scheduled )( context, victim );
1201 
1202   while ( true ) {
1203     Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
1204 
1205     action = _Scheduler_SMP_Try_to_schedule(
1206       highest_ready,
1207       get_idle_node,
1208       context
1209     );
1210 
1211     if ( RTEMS_PREDICT_TRUE( action == SCHEDULER_SMP_DO_SCHEDULE ) ) {
1212       _Scheduler_SMP_Allocate_processor(
1213         context,
1214         highest_ready,
1215         cpu,
1216         allocate_processor
1217       );
1218 
1219       ( *move_from_ready_to_scheduled )( context, highest_ready );
1220       return;
1221     }
1222 
1223     _Assert( action == SCHEDULER_SMP_DO_NOT_SCHEDULE );
1224     ( *extract_from_ready )( context, highest_ready );
1225   }
1226 }
1227 
1228 /**
1229  * @brief Schedules the highest ready node and preempts a currently executing one.
1230  *
1231  * @param context The scheduler context instance.
1232  * @param victim The node of the thread that is repressed by the newly scheduled thread.
1233  * @param extract_from_ready Function to extract a node from the set of
1234  *      ready nodes.
1235  * @param get_highest_ready Function to get the highest ready node.
1236  * @param move_from_ready_to_scheduled Function to move a node from the set
1237  *      of ready nodes to the set of scheduled nodes.
1238  * @param allocate_processor Function to allocate a processor to a node
1239  *      based on the rules of the scheduler.
1240  */
1241 static inline void _Scheduler_SMP_Preempt_and_schedule_highest_ready(
1242   Scheduler_Context                *context,
1243   Scheduler_Node                   *victim,
1244   Scheduler_SMP_Extract             extract_from_ready,
1245   Scheduler_SMP_Get_highest_ready   get_highest_ready,
1246   Scheduler_SMP_Move                move_from_ready_to_scheduled,
1247   Scheduler_SMP_Allocate_processor  allocate_processor,
1248   Scheduler_Get_idle_node           get_idle_node,
1249   Scheduler_Release_idle_node       release_idle_node
1250 )
1251 {
1252   Thread_Control      *victim_idle;
1253   Scheduler_SMP_Action action;
1254 
1255   _Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_READY );
1256   victim_idle = _Scheduler_Release_idle_thread_if_necessary(
1257     victim,
1258     release_idle_node,
1259     context
1260   );
1261 
1262   while ( true ) {
1263     Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
1264 
1265     action = _Scheduler_SMP_Try_to_schedule(
1266       highest_ready,
1267       get_idle_node,
1268       context
1269     );
1270 
1271     if ( RTEMS_PREDICT_TRUE( action == SCHEDULER_SMP_DO_SCHEDULE ) ) {
1272       _Scheduler_SMP_Preempt(
1273         context,
1274         highest_ready,
1275         victim,
1276         victim_idle,
1277         allocate_processor
1278       );
1279 
1280       ( *move_from_ready_to_scheduled )( context, highest_ready );
1281       return;
1282     }
1283 
1284     _Assert( action == SCHEDULER_SMP_DO_NOT_SCHEDULE );
1285     ( *extract_from_ready )( context, highest_ready );
1286   }
1287 }
1288 
1289 /**
1290  * @brief Blocks the thread.
1291  *
1292  * @param context The scheduler instance context.
1293  * @param[in, out] thread The thread of the scheduling operation.
1294  * @param[in, out] node The scheduler node of the thread to block.
1295  * @param extract_from_scheduled Function to extract a node from the set of
1296  *      scheduled nodes.
1297  * @param extract_from_ready Function to extract a node from the set of
1298  *      ready nodes.
1299  * @param get_highest_ready Function to get the highest ready node.
1300  * @param move_from_ready_to_scheduled Function to move a node from the set
1301  *      of ready nodes to the set of scheduled nodes.
1302  * @param allocate_processor Function to allocate a processor to a node
1303  *      based on the rules of the scheduler.
1304  */
1305 static inline void _Scheduler_SMP_Block(
1306   Scheduler_Context                *context,
1307   Thread_Control                   *thread,
1308   Scheduler_Node                   *node,
1309   Scheduler_SMP_Extract             extract_from_scheduled,
1310   Scheduler_SMP_Extract             extract_from_ready,
1311   Scheduler_SMP_Get_highest_ready   get_highest_ready,
1312   Scheduler_SMP_Move                move_from_ready_to_scheduled,
1313   Scheduler_SMP_Allocate_processor  allocate_processor,
1314   Scheduler_Get_idle_node           get_idle_node
1315 )
1316 {
1317   int                       sticky_level;
1318   ISR_lock_Context          lock_context;
1319   Scheduler_SMP_Node_state  node_state;
1320   Per_CPU_Control          *cpu;
1321 
1322   sticky_level = node->sticky_level;
1323   --sticky_level;
1324   node->sticky_level = sticky_level;
1325   _Assert( sticky_level >= 0 );
1326 
1327   _Thread_Scheduler_acquire_critical( thread, &lock_context );
1328   _Scheduler_SMP_Cancel_ask_for_help( thread );
1329   cpu = _Thread_Get_CPU( thread );
1330   _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
1331   _Thread_Scheduler_release_critical( thread, &lock_context );
1332 
1333   node_state = _Scheduler_SMP_Node_state( node );
1334 
1335   if ( RTEMS_PREDICT_FALSE( sticky_level > 0 ) ) {
1336     if (
1337       node_state == SCHEDULER_SMP_NODE_SCHEDULED &&
1338       _Scheduler_Node_get_idle( node ) == NULL
1339     ) {
1340       Thread_Control *idle;
1341 
1342       idle = _Scheduler_Use_idle_thread( node, get_idle_node, context );
1343       _Thread_Set_CPU( idle, cpu );
1344       _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, idle );
1345     }
1346 
1347     return;
1348   }
1349 
1350   _Assert( _Scheduler_Node_get_user( node ) == thread );
1351   _Assert( _Scheduler_Node_get_idle( node ) == NULL );
1352 
1353   if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1354     _Scheduler_SMP_Schedule_highest_ready(
1355       context,
1356       node,
1357       cpu,
1358       extract_from_scheduled,
1359       extract_from_ready,
1360       get_highest_ready,
1361       move_from_ready_to_scheduled,
1362       allocate_processor,
1363       get_idle_node
1364     );
1365   } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1366     _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
1367     ( *extract_from_ready )( context, node );
1368   }
1369 }
1370 
1371 /**
1372  * @brief Unblocks the thread.
1373  *
1374  * @param context The scheduler instance context.
1375  * @param[in, out] thread The thread of the scheduling operation.
1376  * @param[in, out] node The scheduler node of the thread to block.
1377  * @param update Function to update the node's priority to the new value.
1378  * @param enqueue Function to insert a node with a priority in the ready queue
1379  *      of a context.
1380  */
1381 static inline void _Scheduler_SMP_Unblock(
1382   Scheduler_Context          *context,
1383   Thread_Control             *thread,
1384   Scheduler_Node             *node,
1385   Scheduler_SMP_Update        update,
1386   Scheduler_SMP_Enqueue       enqueue,
1387   Scheduler_Release_idle_node release_idle_node
1388 )
1389 {
1390   Scheduler_SMP_Node_state  node_state;
1391   Priority_Control          priority;
1392 
1393   _Assert( _Chain_Is_node_off_chain( &thread->Scheduler.Help_node ) );
1394 
1395   ++node->sticky_level;
1396   _Assert( node->sticky_level > 0 );
1397 
1398   node_state = _Scheduler_SMP_Node_state( node );
1399 
1400   if ( RTEMS_PREDICT_FALSE( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) ) {
1401     _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
1402     _Scheduler_Discard_idle_thread(
1403       thread,
1404       node,
1405       release_idle_node,
1406       context
1407     );
1408 
1409     return;
1410   }
1411 
1412   _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_READY );
1413 
1414   priority = _Scheduler_Node_get_priority( node );
1415   priority = SCHEDULER_PRIORITY_PURIFY( priority );
1416 
1417   if ( priority != _Scheduler_SMP_Node_priority( node ) ) {
1418     ( *update )( context, node, priority );
1419   }
1420 
1421   if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
1422     Priority_Control insert_priority;
1423     bool             needs_help;
1424 
1425     insert_priority = SCHEDULER_PRIORITY_APPEND( priority );
1426     needs_help = ( *enqueue )( context, node, insert_priority );
1427 
1428     if ( needs_help && thread->Scheduler.helping_nodes > 0 ) {
1429       _Scheduler_SMP_Request_ask_for_help( thread );
1430     }
1431   } else {
1432     _Assert( node_state == SCHEDULER_SMP_NODE_READY );
1433     _Assert( node->sticky_level > 0 );
1434     _Assert( node->idle == NULL );
1435     _Scheduler_SMP_Request_ask_for_help( thread );
1436   }
1437 }
1438 
1439 /**
1440  * @brief Updates the priority of the node and the position in the queues it
1441  * is in.
1442  *
1443  * This function firstly updates the priority of the node and then extracts
1444  * and reinserts it into the queue the node is part of using the given
1445  * functions.
1446  *
1447  * @param context The scheduler instance context.
1448  * @param thread The thread for the operation.
1449  * @param[in, out] node The node to update the priority of.
1450  * @param extract_from_scheduled Function to extract a node from the set of
1451  *      scheduled nodes.
1452  * @param extract_from_ready Function to extract a node from the ready
1453  *      queue of the scheduler context.
1454  * @param update Function to update the priority of a node in the scheduler
1455  *      context.
1456  * @param enqueue Function to enqueue a node with a given priority.
1457  * @param enqueue_scheduled Function to enqueue a scheduled node.
1458  * @param ask_for_help Function to perform a help request.
1459  */
1460 static inline void _Scheduler_SMP_Update_priority(
1461   Scheduler_Context              *context,
1462   Thread_Control                 *thread,
1463   Scheduler_Node                 *node,
1464   Scheduler_SMP_Extract           extract_from_scheduled,
1465   Scheduler_SMP_Extract           extract_from_ready,
1466   Scheduler_SMP_Update            update,
1467   Scheduler_SMP_Enqueue           enqueue,
1468   Scheduler_SMP_Enqueue_scheduled enqueue_scheduled,
1469   Scheduler_SMP_Ask_for_help      ask_for_help
1470 )
1471 {
1472   Priority_Control         priority;
1473   Priority_Control         insert_priority;
1474   Scheduler_SMP_Node_state node_state;
1475 
1476   insert_priority = _Scheduler_Node_get_priority( node );
1477   priority = SCHEDULER_PRIORITY_PURIFY( insert_priority );
1478 
1479   if ( priority == _Scheduler_SMP_Node_priority( node ) ) {
1480     if ( _Thread_Is_ready( thread ) ) {
1481       ( *ask_for_help )( context, thread, node );
1482     }
1483 
1484     return;
1485   }
1486 
1487   node_state = _Scheduler_SMP_Node_state( node );
1488 
1489   if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1490     ( *extract_from_scheduled )( context, node );
1491     ( *update )( context, node, priority );
1492     ( *enqueue_scheduled )( context, node, insert_priority );
1493   } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1494     ( *extract_from_ready )( context, node );
1495     ( *update )( context, node, priority );
1496     ( *enqueue )( context, node, insert_priority );
1497   } else {
1498     ( *update )( context, node, priority );
1499 
1500     if ( _Thread_Is_ready( thread ) ) {
1501       ( *ask_for_help )( context, thread, node );
1502     }
1503   }
1504 }
1505 
1506 /**
1507  * @brief Performs a yield and asks for help if necessary.
1508  *
1509  * @param context The scheduler instance context.
1510  * @param thread The thread for the operation.
1511  * @param node The node of the thread that yields.
1512  * @param extract_from_scheduled Function to extract a node from the set of
1513  *      scheduled nodes.
1514  * @param extract_from_ready Function to extract a node from the ready
1515  *      queue of the scheduler context.
1516  * @param enqueue Function to enqueue a node with a given priority.
1517  * @param enqueue_scheduled Function to enqueue a scheduled node.
1518  */
1519 static inline void _Scheduler_SMP_Yield(
1520   Scheduler_Context              *context,
1521   Thread_Control                 *thread,
1522   Scheduler_Node                 *node,
1523   Scheduler_SMP_Extract           extract_from_scheduled,
1524   Scheduler_SMP_Extract           extract_from_ready,
1525   Scheduler_SMP_Enqueue           enqueue,
1526   Scheduler_SMP_Enqueue_scheduled enqueue_scheduled
1527 )
1528 {
1529   Scheduler_SMP_Node_state node_state;
1530   Priority_Control         insert_priority;
1531 
1532   node_state = _Scheduler_SMP_Node_state( node );
1533   insert_priority = _Scheduler_SMP_Node_priority( node );
1534   insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
1535 
1536   if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1537     ( *extract_from_scheduled )( context, node );
1538     ( *enqueue_scheduled )( context, node, insert_priority );
1539   } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1540     ( *extract_from_ready )( context, node );
1541     (void) ( *enqueue )( context, node, insert_priority );
1542   }
1543 }
1544 
1545 /**
1546  * @brief Inserts the node with the given priority into the scheduled nodes.
1547  *
1548  * @param context The scheduler instance context.
1549  * @param node_to_insert The scheduled node to insert.
1550  * @param priority_to_insert The priority with which to insert the node.
1551  */
1552 static inline void _Scheduler_SMP_Insert_scheduled(
1553   Scheduler_Context *context,
1554   Scheduler_Node    *node_to_insert,
1555   Priority_Control   priority_to_insert
1556 )
1557 {
1558   Scheduler_SMP_Context *self;
1559 
1560   self = _Scheduler_SMP_Get_self( context );
1561 
1562   _Chain_Insert_ordered_unprotected(
1563     &self->Scheduled,
1564     &node_to_insert->Node.Chain,
1565     &priority_to_insert,
1566     _Scheduler_SMP_Priority_less_equal
1567   );
1568 }
1569 
1570 /**
1571  * @brief Asks for help.
1572  *
1573  * @param context The scheduler instance context.
1574  * @param thread The thread that asks for help.
1575  * @param[in, out] node The node of the thread that performs the ask for help
1576  *      operation.
1577  * @param order The order function.
1578  * @param insert_ready Function to insert a node into the set of ready
1579  *      nodes.
1580  * @param insert_scheduled Function to insert a node into the set of
1581  *      scheduled nodes.
1582  * @param move_from_scheduled_to_ready Function to move a node from the set
1583  *      of scheduled nodes to the set of ready nodes.
1584  * @param get_lowest_scheduled Function to select the node from the
1585  *      scheduled nodes to replace.
1586  * @param allocate_processor Function to allocate a processor to a node
1587  *      based on the rules of the scheduler.
1588  *
1589  * @retval true The ask for help operation was successful.
1590  * @retval false The ask for help operation was not successful.
1591  */
1592 static inline bool _Scheduler_SMP_Ask_for_help(
1593   Scheduler_Context                  *context,
1594   Thread_Control                     *thread,
1595   Scheduler_Node                     *node,
1596   Chain_Node_order                    order,
1597   Scheduler_SMP_Insert                insert_ready,
1598   Scheduler_SMP_Insert                insert_scheduled,
1599   Scheduler_SMP_Move                  move_from_scheduled_to_ready,
1600   Scheduler_SMP_Get_lowest_scheduled  get_lowest_scheduled,
1601   Scheduler_SMP_Allocate_processor    allocate_processor,
1602   Scheduler_Release_idle_node         release_idle_node
1603 )
1604 {
1605   Scheduler_Node   *lowest_scheduled;
1606   ISR_lock_Context  lock_context;
1607   bool              success;
1608 
1609   if ( thread->Scheduler.pinned_scheduler != NULL ) {
1610     /*
1611      * Pinned threads are not allowed to ask for help.  Return success to break
1612      * the loop in _Thread_Ask_for_help() early.
1613      */
1614     return true;
1615   }
1616 
1617   lowest_scheduled = ( *get_lowest_scheduled )( context, node );
1618 
1619   _Thread_Scheduler_acquire_critical( thread, &lock_context );
1620 
1621   if ( thread->Scheduler.state == THREAD_SCHEDULER_READY ) {
1622     Scheduler_SMP_Node_state node_state;
1623 
1624     node_state = _Scheduler_SMP_Node_state( node );
1625 
1626     if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
1627       Priority_Control insert_priority;
1628 
1629       insert_priority = _Scheduler_SMP_Node_priority( node );
1630 
1631       if (
1632         ( *order )(
1633           &insert_priority,
1634           &node->Node.Chain,
1635           &lowest_scheduled->Node.Chain
1636         )
1637       ) {
1638         Thread_Control *lowest_scheduled_idle;
1639 
1640         _Scheduler_SMP_Cancel_ask_for_help( thread );
1641         _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
1642         _Thread_Scheduler_release_critical( thread, &lock_context );
1643 
1644         lowest_scheduled_idle = _Scheduler_Release_idle_thread_if_necessary(
1645           lowest_scheduled,
1646           release_idle_node,
1647           context
1648         );
1649 
1650         _Scheduler_SMP_Preempt(
1651           context,
1652           node,
1653           lowest_scheduled,
1654           lowest_scheduled_idle,
1655           allocate_processor
1656         );
1657 
1658         ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
1659         ( *insert_scheduled )( context, node, insert_priority );
1660 
1661         success = true;
1662       } else {
1663         _Thread_Scheduler_release_critical( thread, &lock_context );
1664 
1665         _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
1666         ( *insert_ready )( context, node, insert_priority );
1667         success = false;
1668       }
1669     } else if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1670       _Scheduler_SMP_Cancel_ask_for_help( thread );
1671       _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
1672       _Thread_Scheduler_release_critical( thread, &lock_context );
1673       _Scheduler_Discard_idle_thread(
1674         thread,
1675         node,
1676         release_idle_node,
1677         context
1678       );
1679       success = true;
1680     } else {
1681       _Thread_Scheduler_release_critical( thread, &lock_context );
1682       success = false;
1683     }
1684   } else {
1685     _Thread_Scheduler_release_critical( thread, &lock_context );
1686     success = false;
1687   }
1688 
1689   return success;
1690 }
1691 
1692 /**
1693  * @brief Reconsiders help request.
1694  *
1695  * @param context The scheduler context instance.
1696  * @param thread The thread to reconsider the help request of.
1697  * @param[in, out] node The scheduler node of @a thread.
1698  * @param extract_from_ready Function to extract a node from the ready queue
1699  *      of the scheduler context.
1700  */
1701 static inline void _Scheduler_SMP_Reconsider_help_request(
1702   Scheduler_Context     *context,
1703   Thread_Control        *thread,
1704   Scheduler_Node        *node,
1705   Scheduler_SMP_Extract  extract_from_ready
1706 )
1707 {
1708   ISR_lock_Context lock_context;
1709 
1710   _Thread_Scheduler_acquire_critical( thread, &lock_context );
1711 
1712   if (
1713     thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED
1714       && _Scheduler_SMP_Node_state( node ) == SCHEDULER_SMP_NODE_READY
1715       && node->sticky_level == 1
1716   ) {
1717     _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
1718     ( *extract_from_ready )( context, node );
1719   }
1720 
1721   _Thread_Scheduler_release_critical( thread, &lock_context );
1722 }
1723 
1724 /**
1725  * @brief Withdraws the node.
1726  *
1727  * @param context The scheduler context instance.
1728  * @param[in, out] thread The thread to change to @a next_state.
1729  * @param[in, out] node The node to withdraw.
1730  * @param next_state The new state for @a thread.
1731  * @param extract_from_scheduled Function to extract a node from the set of
1732  *      scheduled nodes.
1733  * @param extract_from_ready Function to extract a node from the ready queue
1734  *      of the scheduler context.
1735  * @param get_highest_ready Function to get the highest ready node.
1736  * @param move_from_ready_to_scheduled Function to move a node from the set
1737  *      of ready nodes to the set of scheduled nodes.
1738  * @param allocate_processor Function to allocate a processor to a node
1739  *      based on the rules of the scheduler.
1740  */
1741 static inline void _Scheduler_SMP_Withdraw_node(
1742   Scheduler_Context                *context,
1743   Thread_Control                   *thread,
1744   Scheduler_Node                   *node,
1745   Thread_Scheduler_state            next_state,
1746   Scheduler_SMP_Extract             extract_from_scheduled,
1747   Scheduler_SMP_Extract             extract_from_ready,
1748   Scheduler_SMP_Get_highest_ready   get_highest_ready,
1749   Scheduler_SMP_Move                move_from_ready_to_scheduled,
1750   Scheduler_SMP_Allocate_processor  allocate_processor,
1751   Scheduler_Get_idle_node           get_idle_node
1752 )
1753 {
1754   ISR_lock_Context         lock_context;
1755   Scheduler_SMP_Node_state node_state;
1756 
1757   _Thread_Scheduler_acquire_critical( thread, &lock_context );
1758 
1759   node_state = _Scheduler_SMP_Node_state( node );
1760 
1761   if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1762     Per_CPU_Control *cpu;
1763 
1764     _Assert( thread == _Scheduler_Node_get_user( node ) );
1765     cpu = _Thread_Get_CPU( thread );
1766     _Scheduler_Thread_change_state( thread, next_state );
1767     _Thread_Scheduler_release_critical( thread, &lock_context );
1768 
1769     _Assert( _Scheduler_Node_get_user( node ) == thread );
1770     _Assert( _Scheduler_Node_get_idle( node ) == NULL );
1771 
1772     _Scheduler_SMP_Schedule_highest_ready(
1773       context,
1774       node,
1775       cpu,
1776       extract_from_scheduled,
1777       extract_from_ready,
1778       get_highest_ready,
1779       move_from_ready_to_scheduled,
1780       allocate_processor,
1781       get_idle_node
1782     );
1783   } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1784     _Thread_Scheduler_release_critical( thread, &lock_context );
1785     _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
1786     ( *extract_from_ready )( context, node );
1787   } else {
1788     _Assert( node_state == SCHEDULER_SMP_NODE_BLOCKED );
1789     _Thread_Scheduler_release_critical( thread, &lock_context );
1790   }
1791 }
1792 
1793 /**
1794  * @brief Makes the node sticky.
1795  *
1796  * @param scheduler is the scheduler of the node.
1797  *
1798  * @param[in, out] the_thread is the thread owning the node.
1799  *
1800  * @param[in, out] node is the scheduler node to make sticky.
1801  */
1802 static inline void _Scheduler_SMP_Make_sticky(
1803   const Scheduler_Control *scheduler,
1804   Thread_Control          *the_thread,
1805   Scheduler_Node          *node,
1806   Scheduler_SMP_Update     update,
1807   Scheduler_SMP_Enqueue    enqueue
1808 )
1809 {
1810   Scheduler_SMP_Node_state node_state;
1811 
1812   node_state = _Scheduler_SMP_Node_state( node );
1813 
1814   if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
1815     Scheduler_Context *context;
1816     Priority_Control   insert_priority;
1817     Priority_Control   priority;
1818 
1819     context = _Scheduler_Get_context( scheduler );
1820     priority = _Scheduler_Node_get_priority( node );
1821     priority = SCHEDULER_PRIORITY_PURIFY( priority );
1822 
1823     if ( priority != _Scheduler_SMP_Node_priority( node ) ) {
1824       ( *update )( context, node, priority );
1825     }
1826 
1827     insert_priority = SCHEDULER_PRIORITY_APPEND( priority );
1828     (void) ( *enqueue )( context, node, insert_priority );
1829   }
1830 }
1831 
1832 /**
1833  * @brief Cleans the sticky property from the node.
1834  *
1835  * @param scheduler is the scheduler of the node.
1836  *
1837  * @param[in, out] the_thread is the thread owning the node.
1838  *
1839  * @param[in, out] node is the scheduler node to clean the sticky property.
1840  */
1841 static inline void _Scheduler_SMP_Clean_sticky(
1842   const Scheduler_Control          *scheduler,
1843   Thread_Control                   *the_thread,
1844   Scheduler_Node                   *node,
1845   Scheduler_SMP_Extract             extract_from_scheduled,
1846   Scheduler_SMP_Extract             extract_from_ready,
1847   Scheduler_SMP_Get_highest_ready   get_highest_ready,
1848   Scheduler_SMP_Move                move_from_ready_to_scheduled,
1849   Scheduler_SMP_Allocate_processor  allocate_processor,
1850   Scheduler_Get_idle_node           get_idle_node,
1851   Scheduler_Release_idle_node       release_idle_node
1852 )
1853 {
1854   Scheduler_SMP_Node_state node_state;
1855 
1856   node_state = _Scheduler_SMP_Node_state( node );
1857 
1858   if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1859     Thread_Control *idle;
1860 
1861     idle = _Scheduler_Node_get_idle( node );
1862 
1863     if ( idle != NULL ) {
1864       Scheduler_Context *context;
1865 
1866       context = _Scheduler_Get_context( scheduler );
1867 
1868       _Scheduler_Release_idle_thread( node, idle, release_idle_node, context );
1869       _Scheduler_SMP_Schedule_highest_ready(
1870         context,
1871         node,
1872         _Thread_Get_CPU( idle ),
1873         extract_from_scheduled,
1874         extract_from_ready,
1875         get_highest_ready,
1876         move_from_ready_to_scheduled,
1877         allocate_processor,
1878         get_idle_node
1879       );
1880     }
1881   }
1882 }
1883 
1884 /**
1885  * @brief Starts the idle thread on the given processor.
1886  *
1887  * @param context The scheduler context instance.
1888  * @param[in, out] idle The idle thread to schedule.
1889  * @param cpu The processor for the idle thread.
1890  * @param register_idle Function to register the idle thread for a cpu.
1891  */
1892 static inline void _Scheduler_SMP_Do_start_idle(
1893   Scheduler_Context           *context,
1894   Thread_Control              *idle,
1895   Per_CPU_Control             *cpu,
1896   Scheduler_SMP_Register_idle  register_idle
1897 )
1898 {
1899   Scheduler_SMP_Context *self;
1900   Scheduler_SMP_Node    *node;
1901 
1902   self = _Scheduler_SMP_Get_self( context );
1903   node = _Scheduler_SMP_Thread_get_node( idle );
1904 
1905   _Scheduler_Thread_change_state( idle, THREAD_SCHEDULER_SCHEDULED );
1906   node->state = SCHEDULER_SMP_NODE_SCHEDULED;
1907 
1908   _Thread_Set_CPU( idle, cpu );
1909   ( *register_idle )( context, &node->Base, cpu );
1910   _Chain_Append_unprotected( &self->Scheduled, &node->Base.Node.Chain );
1911 }
1912 
1913 /**
1914  * @brief Adds the idle thread to the processor.
1915  *
1916  * @param context The scheduler context instance.
1917  * @param[in, out] idle The idle thread to add to the processor.
1918  * @param has_ready Function that checks if a given context has ready threads.
1919  * @param enqueue_scheduled Function to enqueue a scheduled node.
1920  * @param register_idle Function to register the idle thread for a cpu.
1921  */
1922 static inline void _Scheduler_SMP_Add_processor(
1923   Scheduler_Context              *context,
1924   Thread_Control                 *idle,
1925   Scheduler_SMP_Has_ready         has_ready,
1926   Scheduler_SMP_Enqueue_scheduled enqueue_scheduled,
1927   Scheduler_SMP_Register_idle     register_idle
1928 )
1929 {
1930   Scheduler_SMP_Context *self;
1931   Scheduler_Node        *node;
1932 
1933   self = _Scheduler_SMP_Get_self( context );
1934   idle->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
1935   node = _Thread_Scheduler_get_home_node( idle );
1936   _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED );
1937   ( *register_idle )( context, node, _Thread_Get_CPU( idle ) );
1938 
1939   if ( ( *has_ready )( &self->Base ) ) {
1940     Priority_Control insert_priority;
1941 
1942     insert_priority = _Scheduler_SMP_Node_priority( node );
1943     insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
1944     ( *enqueue_scheduled )( &self->Base, node, insert_priority );
1945   } else {
1946     _Chain_Append_unprotected( &self->Scheduled, &node->Node.Chain );
1947   }
1948 }
1949 
1950 /**
1951  * @brief Removes an idle thread from the processor.
1952  *
1953  * @param context The scheduler context instance.
1954  * @param cpu The processor to remove from.
1955  * @param extract_from_scheduled Function to extract a node from the set of
1956  *      scheduled nodes.
1957  * @param extract_from_ready Function to extract a node from the ready queue
1958  *      of the scheduler context.
1959  * @param enqueue Function to enqueue a node with a given priority.
1960  *
1961  * @return The idle thread of @a cpu.
1962  */
1963 static inline Thread_Control *_Scheduler_SMP_Remove_processor(
1964   Scheduler_Context          *context,
1965   Per_CPU_Control            *cpu,
1966   Scheduler_SMP_Extract       extract_from_scheduled,
1967   Scheduler_SMP_Extract       extract_from_ready,
1968   Scheduler_SMP_Enqueue       enqueue,
1969   Scheduler_Get_idle_node     get_idle_node,
1970   Scheduler_Release_idle_node release_idle_node
1971 )
1972 {
1973   Scheduler_SMP_Context *self;
1974   Chain_Node            *chain_node;
1975   Scheduler_Node        *victim_node;
1976   Thread_Control        *victim_user;
1977   Thread_Control        *victim_owner;
1978   Thread_Control        *idle;
1979 
1980   self = _Scheduler_SMP_Get_self( context );
1981   chain_node = _Chain_First( &self->Scheduled );
1982 
1983   do {
1984     _Assert( chain_node != _Chain_Immutable_tail( &self->Scheduled ) );
1985     victim_node = (Scheduler_Node *) chain_node;
1986     victim_user = _Scheduler_Node_get_user( victim_node );
1987     chain_node = _Chain_Next( chain_node );
1988   } while ( _Thread_Get_CPU( victim_user ) != cpu );
1989 
1990   ( *extract_from_scheduled )( &self->Base, victim_node );
1991   victim_owner = _Scheduler_Node_get_owner( victim_node );
1992 
1993   if ( !victim_owner->is_idle ) {
1994     Thread_Control  *victim_idle;
1995     Scheduler_Node  *idle_node;
1996     Priority_Control insert_priority;
1997 
1998     victim_idle = _Scheduler_Release_idle_thread_if_necessary(
1999       victim_node,
2000       release_idle_node,
2001       &self->Base
2002     );
2003     idle_node = ( *get_idle_node )( &self->Base );
2004     idle = _Scheduler_Node_get_owner( idle_node );
2005     _Scheduler_SMP_Preempt(
2006       &self->Base,
2007       idle_node,
2008       victim_node,
2009       victim_idle,
2010       _Scheduler_SMP_Allocate_processor_exact
2011     );
2012 
2013     _Assert( !_Chain_Is_empty( &self->Scheduled ) );
2014     insert_priority = _Scheduler_SMP_Node_priority( victim_node );
2015     insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
2016     ( *enqueue )( &self->Base, victim_node, insert_priority );
2017   } else {
2018     _Assert( victim_owner == victim_user );
2019     _Assert( _Scheduler_Node_get_idle( victim_node ) == NULL );
2020     idle = victim_owner;
2021   }
2022 
2023   return idle;
2024 }
2025 
2026 /**
2027  * @brief Sets the affinity of the node.
2028  *
2029  * Also performs a reinsert into the queue the node is currently in.
2030  *
2031  * @param context The scheduler context instance.
2032  * @param thread The thread for the operation.
2033  * @param[in, out] node The node to set the affinity of.
2034  * @param arg The affinity for @a node.
2035  * @param set_affinity Function to set the affinity of a node.
2036  * @param extract_from_scheduled Function to extract a node from the set of
2037  *      scheduled nodes.
2038  * @param extract_from_ready Function to extract a node from the ready queue
2039  *      of the scheduler context.
2040  * @param get_highest_ready Function to get the highest ready node.
2041  * @param move_from_ready_to_scheduled Function to move a node from the set
2042  *      of ready nodes to the set of scheduled nodes.
2043  * @param enqueue Function to enqueue a node with a given priority.
2044  * @param allocate_processor Function to allocate a processor to a node
2045  *      based on the rules of the scheduler.
2046  */
2047 static inline void _Scheduler_SMP_Set_affinity(
2048   Scheduler_Context               *context,
2049   Thread_Control                  *thread,
2050   Scheduler_Node                  *node,
2051   void                            *arg,
2052   Scheduler_SMP_Set_affinity       set_affinity,
2053   Scheduler_SMP_Extract            extract_from_scheduled,
2054   Scheduler_SMP_Extract            extract_from_ready,
2055   Scheduler_SMP_Get_highest_ready  get_highest_ready,
2056   Scheduler_SMP_Move               move_from_ready_to_scheduled,
2057   Scheduler_SMP_Enqueue            enqueue,
2058   Scheduler_SMP_Allocate_processor allocate_processor,
2059   Scheduler_Get_idle_node          get_idle_node,
2060   Scheduler_Release_idle_node      release_idle_node
2061 )
2062 {
2063   Scheduler_SMP_Node_state node_state;
2064   Priority_Control         insert_priority;
2065 
2066   node_state = _Scheduler_SMP_Node_state( node );
2067   insert_priority = _Scheduler_SMP_Node_priority( node );
2068   insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
2069 
2070   if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
2071     ( *extract_from_scheduled )( context, node );
2072     _Scheduler_SMP_Preempt_and_schedule_highest_ready(
2073       context,
2074       node,
2075       extract_from_ready,
2076       get_highest_ready,
2077       move_from_ready_to_scheduled,
2078       allocate_processor,
2079       get_idle_node,
2080       release_idle_node
2081     );
2082     ( *set_affinity )( context, node, arg );
2083     ( *enqueue )( context, node, insert_priority );
2084   } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
2085     ( *extract_from_ready )( context, node );
2086     ( *set_affinity )( context, node, arg );
2087     ( *enqueue )( context, node, insert_priority );
2088   } else {
2089     _Assert( node_state == SCHEDULER_SMP_NODE_BLOCKED );
2090     ( *set_affinity )( context, node, arg );
2091   }
2092 }
2093 
2094 /** @} */
2095 
2096 #ifdef __cplusplus
2097 }
2098 #endif /* __cplusplus */
2099 
2100 #endif /* _RTEMS_SCORE_SCHEDULERSMPIMPL_H */