Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:24:52

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /**
0004  * @file
0005  *
0006  * @ingroup ScoreSchedSmpValSmp
0007  */
0008 
0009 /*
0010  * Copyright (C) 2021, 2022 embedded brains GmbH & Co. KG
0011  *
0012  * Redistribution and use in source and binary forms, with or without
0013  * modification, are permitted provided that the following conditions
0014  * are met:
0015  * 1. Redistributions of source code must retain the above copyright
0016  *    notice, this list of conditions and the following disclaimer.
0017  * 2. Redistributions in binary form must reproduce the above copyright
0018  *    notice, this list of conditions and the following disclaimer in the
0019  *    documentation and/or other materials provided with the distribution.
0020  *
0021  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0022  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0023  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0024  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
0025  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0026  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0027  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0028  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0029  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0030  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
0031  * POSSIBILITY OF SUCH DAMAGE.
0032  */
0033 
0034 /*
0035  * This file is part of the RTEMS quality process and was automatically
0036  * generated.  If you find something that needs to be fixed or
0037  * worded better please post a report or patch to an RTEMS mailing list
0038  * or raise a bug report:
0039  *
0040  * https://www.rtems.org/bugs.html
0041  *
0042  * For information on updating and regenerating please refer to the How-To
0043  * section in the Software Requirements Engineering chapter of the
0044  * RTEMS Software Engineering manual.  The manual is provided as a part of
0045  * a release.  For development sources please refer to the online
0046  * documentation at:
0047  *
0048  * https://docs.rtems.org
0049  */
0050 
0051 #ifdef HAVE_CONFIG_H
0052 #include "config.h"
0053 #endif
0054 
0055 #include <rtems.h>
0056 #include <rtems/test-scheduler.h>
0057 #include <rtems/score/percpu.h>
0058 #include <rtems/score/schedulersmp.h>
0059 #include <rtems/score/threadimpl.h>
0060 
0061 #include "tx-support.h"
0062 
0063 #include <rtems/test.h>
0064 
0065 /**
0066  * @defgroup ScoreSchedSmpValSmp spec:/score/sched/smp/val/smp
0067  *
0068  * @ingroup TestsuitesValidationSmpOnly0
0069  *
0070  * @brief Tests SMP-specific scheduler behaviour.
0071  *
0072  * This test case performs the following actions:
0073  *
0074  * - Construct a system state in which a sticky thread is blocked while an idle
0075  *   thread executes on behalf of the thread.
0076  *
0077  *   - Block the sticky worker A while it uses an idle thread in the home
0078  *     scheduler.
0079  *
0080  *   - Clean up all used resources.
0081  *
0082  * - Construct a system state in which a thread is preempted while it is
0083  *   blocked.
0084  *
0085  *   - Block worker A and preempt it before the withdraw node operations are
0086  *     performed for worker A.
0087  *
0088  *   - Clean up all used resources.
0089  *
0090  * - Construct a system state in which a thread is rescheduled  while it is not
0091  *   scheduled on another scheduler.
0092  *
0093  *   - Reschedule worker A by the home scheduler while worker A is not
0094  *     scheduled on another scheduler.
0095  *
0096  *   - Clean up all used resources.
0097  *
0098  * - Construct a system state in which an ask for help request is cancelled
0099  *   while it is processed on another processor.
0100  *
0101  *   - Unblock worker A.  It cannot be scheduled on its home scheduler.
0102  *     Intercept the ask for help request.  Block the worker A.  This will
0103  *     cancel the ask for help request.  Remove the request while the other
0104  *     processor tries to cancel the request.
0105  *
0106  *   - Clean up all used resources.
0107  *
0108  * - Construct a system state in which a scheduler tries to schedule a node
0109  *   those owner thread is already scheduled during a block operation.
0110  *
0111  *   - Block the runner thread while the owner thread of the highest priority
0112  *     ready node is already scheduled.
0113  *
0114  *   - Clean up all used resources.
0115  *
0116  * - Construct a system state in which a scheduler tries to schedule a node
0117  *   those owner thread is blocked during a block operation.
0118  *
0119  *   - Block the runner thread while the owner thread of the highest priority
0120  *     ready node is blocked.
0121  *
0122  *   - Clean up all used resources.
0123  *
0124  * - Construct a system state in which a scheduler tries to schedule a node
0125  *   those owner thread is already scheduled during a set affinity operation.
0126  *
0127  *   - Set the affinity of the runner thread while the owner thread of the
0128  *     highest priority ready node is already scheduled.
0129  *
0130  *   - Clean up all used resources.
0131  *
0132  * - Construct a system state in which a scheduler tries to schedule a node
0133  *   those owner thread is already scheduled during a set affinity operation
0134  *   while a sticky node is involved.
0135  *
0136  *   - Set the affinity of the runner thread while the owner thread of the
0137  *     highest priority ready node is already scheduled.
0138  *
0139  *   - Clean up all used resources.
0140  *
0141  * - Construct a system state in which a scheduler tries to schedule a node
0142  *   those owner thread is blocked during a set affinity operation.
0143  *
0144  *   - Set the affinity of the runner thread while the owner thread of the
0145  *     highest priority ready node is blocked.
0146  *
0147  *   - Clean up all used resources.
0148  *
0149  * - Construct a system state in which a scheduler tries to schedule a node
0150  *   those owner thread is blocked during a set affinity operation while a
0151  *   sticky node is involved.
0152  *
0153  *   - Set the affinity of the runner thread while the owner thread of the
0154  *     highest priority ready node is blocked.
0155  *
0156  *   - Clean up all used resources.
0157  *
0158  * - Construct a system state in which a scheduler tries to schedule a node
0159  *   those owner thread is already scheduled during a set priority operation.
0160  *
0161  *   - Set the priority of the runner thread while the owner thread of the
0162  *     highest priority ready node is already scheduled.
0163  *
0164  *   - Clean up all used resources.
0165  *
0166  * - Construct a system state in which a scheduler tries to schedule a node
0167  *   those owner thread is already scheduled during a set priority operation
0168  *   while a sticky node is involved.
0169  *
0170  *   - Set the priority of the runner thread while the owner thread of the
0171  *     highest priority ready node is already scheduled.
0172  *
0173  *   - Clean up all used resources.
0174  *
0175  * - Construct a system state in which a scheduler tries to schedule a node
0176  *   those owner thread is blocked during a set priority operation.
0177  *
0178  *   - Set the priority of the runner thread while the owner thread of the
0179  *     highest priority ready node is blocked.
0180  *
0181  *   - Clean up all used resources.
0182  *
0183  * - Construct a system state in which a scheduler tries to schedule a node
0184  *   those owner thread is already scheduled during a yield operation.
0185  *
0186  *   - Yield while the owner thread of the highest priority ready node is
0187  *     already scheduled.
0188  *
0189  *   - Clean up all used resources.
0190  *
0191  * - Construct a system state in which a scheduler tries to schedule a node
0192  *   those owner thread is already scheduled during a yield operation while a
0193  *   sticky node is involved.
0194  *
0195  *   - Yield while the owner thread of the highest priority ready node is
0196  *     already scheduled.
0197  *
0198  *   - Clean up all used resources.
0199  *
0200  * - Construct a system state in which a scheduler tries to schedule a node
0201  *   those owner thread is blocked during a yield operation.
0202  *
0203  *   - Yield while the owner thread of the highest priority ready node is
0204  *     blocked.
0205  *
0206  *   - Clean up all used resources.
0207  *
0208  * - Construct a system state in which a scheduler tries to schedule a node
0209  *   those owner thread is blocked during a yield operation while a sticky node
0210  *   is involved.
0211  *
0212  *   - Yield while the owner thread of the highest priority ready node is
0213  *     blocked.
0214  *
0215  *   - Clean up all used resources.
0216  *
0217  * - Create three worker threads and a mutex.  Use the mutex and the worker to
0218  *   check that a not scheduled thread does not get removed from the set of
0219  *   ready threads of a scheduler when a help request is reconsidered.
0220  *
0221  *   - Prevent that worker B can perform a post-switch cleanup.
0222  *
0223  *   - Give worker C a lower priority than worker B.  Worker B will try to
0224  *     finish the thread dispatch by doing a post-switch cleanup.  The
0225  *     post-switch cleanup cannot progress since the runner owns the thread
0226  *     state lock.  Wait until the other processor waits on the thread state
0227  *     lock of worker B.
0228  *
0229  *   - Give worker C a higher priority than worker B.  Let worker B do its
0230  *     post-switch cleanup which will carry out the reconsider help requests
0231  *     for a not scheduled thread.
0232  *
0233  *   - Clean up all used resources.
0234  *
0235  * @{
0236  */
0237 
0238 typedef enum {
0239   WORKER_A,
0240   WORKER_B,
0241   WORKER_C,
0242   WORKER_COUNT
0243 } WorkerIndex;
0244 
0245 /**
0246  * @brief Test context for spec:/score/sched/smp/val/smp test case.
0247  */
0248 typedef struct {
0249   /**
0250    * @brief This member contains the runner identifier.
0251    */
0252   rtems_id runner_id;
0253 
0254   /**
0255    * @brief This member contains the worker identifiers.
0256    */
0257   rtems_id worker_id[ WORKER_COUNT ];
0258 
0259   /**
0260    * @brief This member contains the mutex identifier.
0261    */
0262   rtems_id mutex_id;
0263 
0264   /**
0265    * @brief This member contains the sticky mutex identifier.
0266    */
0267   rtems_id sticky_id;
0268 
0269   /**
0270    * @brief This member contains the worker busy status.
0271    */
0272   volatile bool busy[ WORKER_COUNT ];
0273 
0274   /**
0275    * @brief This member contains an ISR lock context.
0276    */
0277   ISR_lock_Context lock_context;
0278 
0279   /**
0280    * @brief This member contains a counter.
0281    */
0282   uint32_t counter;
0283 
0284   /**
0285    * @brief If this member is true, then the worker shall be in the busy loop.
0286    */
0287   volatile bool is_busy[ WORKER_COUNT ];
0288 
0289   /**
0290    * @brief This member contains the per-CPU jobs.
0291    */
0292   Per_CPU_Job job[ 2 ];
0293 
0294   /**
0295    * @brief This member contains the per-CPU job contexts.
0296    */
0297   Per_CPU_Job_context job_context[ 2 ];
0298 
0299   /**
0300    * @brief This member contains the call within ISR request.
0301    */
0302   CallWithinISRRequest request;
0303 } ScoreSchedSmpValSmp_Context;
0304 
0305 static ScoreSchedSmpValSmp_Context
0306   ScoreSchedSmpValSmp_Instance;
0307 
0308 #define EVENT_OBTAIN RTEMS_EVENT_0
0309 
0310 #define EVENT_RELEASE RTEMS_EVENT_1
0311 
0312 #define EVENT_STICKY_OBTAIN RTEMS_EVENT_2
0313 
0314 #define EVENT_STICKY_RELEASE RTEMS_EVENT_3
0315 
0316 #define EVENT_SYNC_RUNNER RTEMS_EVENT_4
0317 
0318 #define EVENT_BUSY RTEMS_EVENT_5
0319 
0320 typedef ScoreSchedSmpValSmp_Context Context;
0321 
0322 static void SendAndSync(
0323   Context        *ctx,
0324   WorkerIndex     worker,
0325   rtems_event_set event
0326 )
0327 {
0328   SendEvents( ctx->worker_id[ worker ], EVENT_SYNC_RUNNER | event );
0329   ReceiveAllEvents( EVENT_SYNC_RUNNER );
0330   WaitForExecutionStop( ctx->worker_id[ worker ] );
0331 }
0332 
0333 static void MakeBusy( Context *ctx, WorkerIndex worker )
0334 {
0335   ctx->is_busy[ worker ] = false;
0336   ctx->busy[ worker ] = true;
0337   SendEvents( ctx->worker_id[ worker ], EVENT_BUSY );
0338 }
0339 
0340 static void WaitForBusy( Context *ctx, WorkerIndex worker )
0341 {
0342   while ( !ctx->is_busy[ worker ] ) {
0343     /* Wait */
0344   }
0345 }
0346 
0347 static void StopBusy( Context *ctx, WorkerIndex worker )
0348 {
0349   ctx->busy[ worker ] = false;
0350   WaitForExecutionStop( ctx->worker_id[ worker ] );
0351 }
0352 
0353 static void MakeSticky( const Context *ctx )
0354 {
0355   ObtainMutex( ctx->sticky_id );
0356 }
0357 
0358 static void CleanSticky( const Context *ctx )
0359 {
0360   ReleaseMutex( ctx->sticky_id );
0361 }
0362 
0363 static void Block( void *arg )
0364 {
0365   Context *ctx;
0366 
0367   ctx = arg;
0368   SuspendTask( ctx->runner_id );
0369   ResumeTask( ctx->runner_id );
0370 }
0371 
0372 static void OperationStopBusyC(
0373   void                    *arg,
0374   const T_scheduler_event *event,
0375   T_scheduler_when         when,
0376   T_scheduler_operation    op
0377 )
0378 {
0379   Context *ctx;
0380 
0381   ctx = arg;
0382 
0383   if ( when == T_SCHEDULER_BEFORE && event->operation == op ) {
0384     T_scheduler_set_event_handler( NULL, NULL );
0385     StopBusy( ctx, WORKER_C );
0386   }
0387 }
0388 
0389 static void BlockStopBusyC(
0390   void                    *arg,
0391   const T_scheduler_event *event,
0392   T_scheduler_when         when
0393 )
0394 {
0395   OperationStopBusyC( arg, event, when, T_SCHEDULER_BLOCK );
0396 }
0397 
0398 static void SetAffinityStopBusyC(
0399   void                    *arg,
0400   const T_scheduler_event *event,
0401   T_scheduler_when         when
0402 )
0403 {
0404   OperationStopBusyC( arg, event, when, T_SCHEDULER_SET_AFFINITY );
0405 }
0406 
0407 static void UpdatePriorityStopBusyC(
0408   void                    *arg,
0409   const T_scheduler_event *event,
0410   T_scheduler_when         when
0411 )
0412 {
0413   OperationStopBusyC( arg, event, when, T_SCHEDULER_UPDATE_PRIORITY );
0414 }
0415 
0416 static void YieldStopBusyC(
0417   void                    *arg,
0418   const T_scheduler_event *event,
0419   T_scheduler_when         when
0420 )
0421 {
0422   OperationStopBusyC( arg, event, when, T_SCHEDULER_YIELD );
0423 }
0424 
0425 static void SuspendA( void *arg )
0426 {
0427   Context *ctx;
0428 
0429   ctx = arg;
0430   SuspendTask( ctx->worker_id[ WORKER_A ] );
0431 }
0432 
0433 static void OperationSuspendA(
0434   void                    *arg,
0435   const T_scheduler_event *event,
0436   T_scheduler_when         when,
0437   T_scheduler_operation    op
0438 )
0439 {
0440   Context *ctx;
0441 
0442   ctx = arg;
0443 
0444   if ( when == T_SCHEDULER_BEFORE && event->operation == op ) {
0445     const rtems_tcb *worker_a;
0446 
0447     T_scheduler_set_event_handler( NULL, NULL );
0448     ctx->job_context[ 0 ].handler = SuspendA;
0449     _Per_CPU_Submit_job( _Per_CPU_Get_by_index( 1 ), &ctx->job[ 0 ] );
0450 
0451     worker_a = GetThread( ctx->worker_id[ WORKER_A ] );
0452 
0453     while ( worker_a->Scheduler.state != THREAD_SCHEDULER_BLOCKED ) {
0454       RTEMS_COMPILER_MEMORY_BARRIER();
0455     }
0456   }
0457 }
0458 
0459 static void BlockSuspendA(
0460   void                    *arg,
0461   const T_scheduler_event *event,
0462   T_scheduler_when         when
0463 )
0464 {
0465   OperationSuspendA( arg, event, when, T_SCHEDULER_BLOCK );
0466 }
0467 
0468 static void SetAffinitySuspendA(
0469   void                    *arg,
0470   const T_scheduler_event *event,
0471   T_scheduler_when         when
0472 )
0473 {
0474   OperationSuspendA( arg, event, when, T_SCHEDULER_SET_AFFINITY );
0475 }
0476 
0477 static void UpdatePrioritySuspendA(
0478   void                    *arg,
0479   const T_scheduler_event *event,
0480   T_scheduler_when         when
0481 )
0482 {
0483   OperationSuspendA( arg, event, when, T_SCHEDULER_UPDATE_PRIORITY );
0484 }
0485 
0486 static void YieldSuspendA(
0487   void                    *arg,
0488   const T_scheduler_event *event,
0489   T_scheduler_when         when
0490 )
0491 {
0492   OperationSuspendA( arg, event, when, T_SCHEDULER_YIELD );
0493 }
0494 
0495 static void GuideAskForHelp( void *arg )
0496 {
0497   Context         *ctx;
0498   Per_CPU_Control *cpu;
0499   ISR_lock_Context lock_context;
0500 
0501   ctx = arg;
0502   cpu = _Per_CPU_Get_by_index( 0 );
0503 
0504   _ISR_lock_ISR_disable( &lock_context );
0505   _Per_CPU_Acquire( cpu, &lock_context );
0506 
0507   ISRLockWaitForOthers( &cpu->Lock, 1 );
0508 
0509   ctx->job_context[ 0 ].handler = SuspendA;
0510   _Per_CPU_Submit_job( _Per_CPU_Get_by_index( 1 ), &ctx->job[ 0 ] );
0511   ISRLockWaitForOthers( &cpu->Lock, 2 );
0512 
0513   _Per_CPU_Release( cpu, &lock_context );
0514   _ISR_lock_ISR_enable( &lock_context );
0515 }
0516 
0517 static void InterceptAskForHelp( void *arg )
0518 {
0519   Context         *ctx;
0520   Per_CPU_Control *cpu_self;
0521 
0522   ctx = arg;
0523   cpu_self = _Per_CPU_Get();
0524 
0525   if ( rtems_scheduler_get_processor_maximum() > 2 ) {
0526     ctx->job_context[ 1 ].handler = GuideAskForHelp;
0527     _Per_CPU_Submit_job( _Per_CPU_Get_by_index( 2 ), &ctx->job[ 1 ] );
0528     ISRLockWaitForOwned( &cpu_self->Lock );
0529   } else {
0530     ISR_lock_Context lock_context;
0531     Chain_Node      *node;
0532     Thread_Control  *thread;
0533 
0534     _ISR_lock_ISR_disable( &lock_context );
0535     _Per_CPU_Acquire( cpu_self, &lock_context );
0536     ctx->job_context[ 0 ].handler = SuspendA;
0537     _Per_CPU_Submit_job( _Per_CPU_Get_by_index( 1 ), &ctx->job[ 0 ] );
0538     ISRLockWaitForOthers( &cpu_self->Lock, 1 );
0539 
0540     /* See _Thread_Preemption_intervention() */
0541     node = _Chain_Get_first_unprotected( &cpu_self->Threads_in_need_for_help );
0542     thread = THREAD_OF_SCHEDULER_HELP_NODE( node );
0543     T_assert_eq_ptr( thread, GetThread( ctx->worker_id[ WORKER_A ] ) );
0544     thread->Scheduler.ask_for_help_cpu = NULL;
0545 
0546     _Per_CPU_Release( cpu_self, &lock_context );
0547     _ISR_lock_ISR_enable( &lock_context );
0548   }
0549 }
0550 
0551 static void UnblockAskForHelp(
0552   void                    *arg,
0553   const T_scheduler_event *event,
0554   T_scheduler_when         when
0555 )
0556 {
0557   Context *ctx;
0558 
0559   ctx = arg;
0560 
0561   if (
0562     when == T_SCHEDULER_BEFORE &&
0563     event->operation == T_SCHEDULER_UNBLOCK
0564   ) {
0565     T_scheduler_set_event_handler( NULL, NULL );
0566     ctx->request.handler = InterceptAskForHelp;
0567     ctx->request.arg = ctx;
0568     CallWithinISRSubmit( &ctx->request );
0569   }
0570 }
0571 
0572 static void RaiseWorkerPriorityWithIdleRunner( void *arg )
0573 {
0574   Context *ctx;
0575 
0576   ctx = arg;
0577   SuspendTask( ctx->runner_id );
0578   T_scheduler_set_event_handler( UpdatePriorityStopBusyC, ctx );
0579   SetPriority( ctx->worker_id[ WORKER_A ], PRIO_HIGH );
0580   SetPriority( ctx->worker_id[ WORKER_A ], PRIO_NORMAL );
0581   ResumeTask( ctx->runner_id );
0582 }
0583 
0584 static void MakeReady( void *arg )
0585 {
0586   Context *ctx;
0587 
0588   ctx = arg;
0589   MakeBusy( ctx, WORKER_C );
0590 }
0591 
0592 static void UpdatePriorityMakeReady(
0593   void                    *arg,
0594   const T_scheduler_event *event,
0595   T_scheduler_when         when
0596 )
0597 {
0598   Context *ctx;
0599 
0600   ctx = arg;
0601 
0602   if (
0603     when == T_SCHEDULER_BEFORE &&
0604     event->operation == T_SCHEDULER_UPDATE_PRIORITY
0605   ) {
0606     Thread_Control  *thread;
0607 
0608     T_scheduler_set_event_handler( NULL, NULL );
0609 
0610     thread = GetThread( ctx->worker_id[ WORKER_A ] );
0611     T_eq_int( thread->Scheduler.state, THREAD_SCHEDULER_SCHEDULED );
0612 
0613     ctx->job_context[ 0 ].handler = MakeReady;
0614     _Per_CPU_Submit_job( _Per_CPU_Get_by_index( 1 ), &ctx->job[ 0 ] );
0615 
0616     while ( thread->Scheduler.state != THREAD_SCHEDULER_READY ) {
0617       RTEMS_COMPILER_MEMORY_BARRIER();
0618     }
0619   }
0620 }
0621 
0622 static void ReadyToScheduled( void *arg )
0623 {
0624   Context *ctx;
0625 
0626   ctx = arg;
0627   SuspendTask( ctx->runner_id );
0628 
0629   T_scheduler_set_event_handler( UpdatePriorityMakeReady, ctx );
0630   SetPriority( ctx->worker_id[ WORKER_A ], PRIO_HIGH );
0631 
0632   SetPriority( ctx->runner_id, PRIO_VERY_HIGH );
0633   ResumeTask( ctx->runner_id );
0634 }
0635 
0636 static void BlockAndReuseIdle( void *arg )
0637 {
0638   Context *ctx;
0639 
0640   ctx = arg;
0641   SuspendTask( ctx->runner_id );
0642   SuspendTask( ctx->worker_id[ WORKER_A ] );
0643   ResumeTask( ctx->worker_id[ WORKER_A ] );
0644   SetPriority( ctx->runner_id, PRIO_HIGH );
0645   ResumeTask( ctx->runner_id );
0646 }
0647 
0648 static void Preempt( void *arg )
0649 {
0650   Context *ctx;
0651 
0652   ctx = arg;
0653   MakeBusy( ctx, WORKER_C );
0654 }
0655 
0656 static void BlockAndPreempt(
0657   void                    *arg,
0658   const T_scheduler_event *event,
0659   T_scheduler_when         when
0660 )
0661 {
0662   Context *ctx;
0663 
0664   ctx = arg;
0665 
0666   if ( when == T_SCHEDULER_AFTER && event->operation == T_SCHEDULER_BLOCK ) {
0667     Thread_Control  *thread;
0668 
0669     T_scheduler_set_event_handler( NULL, NULL );
0670 
0671     thread = GetThread( ctx->worker_id[ WORKER_A ] );
0672     T_eq_int( thread->Scheduler.state, THREAD_SCHEDULER_BLOCKED );
0673 
0674     ctx->job_context[ 0 ].handler = Preempt;
0675     _Per_CPU_Submit_job( _Per_CPU_Get_by_index( 1 ), &ctx->job[ 0 ] );
0676     _Per_CPU_Wait_for_job( _Per_CPU_Get_by_index( 1 ), &ctx->job[ 0 ] );
0677   }
0678 }
0679 
0680 static void PrepareOwnerScheduled( Context *ctx )
0681 {
0682   SetScheduler( ctx->worker_id[ WORKER_B ], SCHEDULER_B_ID, PRIO_NORMAL );
0683   SendEvents( ctx->worker_id[ WORKER_A ], EVENT_OBTAIN );
0684   SendAndSync( ctx, WORKER_B, EVENT_OBTAIN );
0685   SetScheduler( ctx->worker_id[ WORKER_C ], SCHEDULER_B_ID, PRIO_HIGH );
0686   SetPriority( ctx->worker_id[ WORKER_A ], PRIO_NORMAL );
0687   MakeBusy( ctx, WORKER_C );
0688   WaitForBusy( ctx, WORKER_C );
0689   MakeBusy( ctx, WORKER_A );
0690 }
0691 
0692 static void CleanupOwnerScheduled( Context *ctx )
0693 {
0694   StopBusy( ctx, WORKER_A );
0695   SetPriority( ctx->worker_id[ WORKER_A ], PRIO_HIGH );
0696   SendEvents( ctx->worker_id[ WORKER_A ], EVENT_RELEASE );
0697   SendAndSync( ctx, WORKER_B, EVENT_RELEASE );
0698   SetScheduler( ctx->worker_id[ WORKER_B ], SCHEDULER_A_ID, PRIO_HIGH );
0699   SetScheduler( ctx->worker_id[ WORKER_C ], SCHEDULER_A_ID, PRIO_HIGH );
0700 }
0701 
0702 static void PrepareOwnerBlocked( Context *ctx )
0703 {
0704   SetScheduler( ctx->worker_id[ WORKER_A ], SCHEDULER_B_ID, PRIO_NORMAL );
0705   SendAndSync( ctx, WORKER_A, EVENT_OBTAIN );
0706   SendEvents( ctx->worker_id[ WORKER_B ], EVENT_OBTAIN );
0707   SetScheduler( ctx->worker_id[ WORKER_C ], SCHEDULER_B_ID, PRIO_HIGH );
0708   MakeBusy( ctx, WORKER_C );
0709   SetPriority( ctx->worker_id[ WORKER_B ], PRIO_LOW );
0710   MakeBusy( ctx, WORKER_A );
0711   SetPriority( ctx->worker_id[ WORKER_B ], PRIO_NORMAL );
0712 }
0713 
0714 static void CleanupOwnerBlocked( Context *ctx )
0715 {
0716   StopBusy( ctx, WORKER_C );
0717   ResumeTask( ctx->worker_id[ WORKER_A ] );
0718   StopBusy( ctx, WORKER_A );
0719   SendAndSync( ctx, WORKER_A, EVENT_RELEASE );
0720   SetPriority( ctx->worker_id[ WORKER_B ], PRIO_HIGH );
0721   SendEvents( ctx->worker_id[ WORKER_B ], EVENT_RELEASE );
0722   SetScheduler( ctx->worker_id[ WORKER_A ], SCHEDULER_A_ID, PRIO_HIGH );
0723   SetScheduler( ctx->worker_id[ WORKER_C ], SCHEDULER_A_ID, PRIO_HIGH );
0724 }
0725 
0726 static void ReconsiderHelpRequestB(
0727   void                    *arg,
0728   const T_scheduler_event *event,
0729   T_scheduler_when         when
0730 )
0731 {
0732   Context *ctx;
0733 
0734   (void) when;
0735   ctx = arg;
0736 
0737   if ( event->operation == T_SCHEDULER_RECONSIDER_HELP_REQUEST ) {
0738     Scheduler_SMP_Node *node;
0739 
0740     node = (Scheduler_SMP_Node *) event->node;
0741     T_eq_int( node->state, SCHEDULER_SMP_NODE_READY );
0742     ++ctx->counter;
0743   }
0744 }
0745 
0746 static void ReleaseThreadLockB(
0747   void                    *arg,
0748   const T_scheduler_event *event,
0749   T_scheduler_when         when
0750 )
0751 {
0752   Context *ctx;
0753 
0754   ctx = arg;
0755 
0756   if (
0757     when == T_SCHEDULER_AFTER &&
0758     event->operation == T_SCHEDULER_UPDATE_PRIORITY
0759   ) {
0760     Thread_Control *worker_b;
0761 
0762     T_scheduler_set_event_handler( ReconsiderHelpRequestB, ctx );
0763 
0764     worker_b = GetThread( ctx->worker_id[ WORKER_B ] );
0765     T_eq_int( worker_b->Scheduler.state, THREAD_SCHEDULER_READY );
0766 
0767     _Thread_State_release_critical( worker_b, &ctx->lock_context );
0768   }
0769 }
0770 
0771 static void Worker( rtems_task_argument arg, WorkerIndex worker )
0772 {
0773   Context *ctx;
0774 
0775   ctx = (Context *) arg;
0776 
0777   while ( true ) {
0778     rtems_event_set events;
0779 
0780     events = ReceiveAnyEvents();
0781 
0782     if ( ( events & EVENT_SYNC_RUNNER ) != 0 ) {
0783       SendEvents( ctx->runner_id, EVENT_SYNC_RUNNER );
0784     }
0785 
0786     if ( ( events & EVENT_OBTAIN ) != 0 ) {
0787       ObtainMutex( ctx->mutex_id );
0788     }
0789 
0790     if ( ( events & EVENT_RELEASE ) != 0 ) {
0791       ReleaseMutex( ctx->mutex_id );
0792     }
0793 
0794     if ( ( events & EVENT_STICKY_OBTAIN ) != 0 ) {
0795       ObtainMutex( ctx->sticky_id );
0796     }
0797 
0798     if ( ( events & EVENT_STICKY_RELEASE ) != 0 ) {
0799       ReleaseMutex( ctx->sticky_id );
0800     }
0801 
0802     if ( ( events & EVENT_BUSY ) != 0 ) {
0803       ctx->is_busy[ worker ] = true;
0804 
0805       while ( ctx->busy[ worker ] ) {
0806         /* Wait */
0807       }
0808 
0809       ctx->is_busy[ worker ] = false;
0810     }
0811   }
0812 }
0813 
0814 static void WorkerA( rtems_task_argument arg )
0815 {
0816   Worker( arg, WORKER_A );
0817 }
0818 
0819 static void WorkerB( rtems_task_argument arg )
0820 {
0821   Worker( arg, WORKER_B );
0822 }
0823 
0824 static void WorkerC( rtems_task_argument arg )
0825 {
0826   Worker( arg, WORKER_C );
0827 }
0828 
0829 static void ScoreSchedSmpValSmp_Setup( ScoreSchedSmpValSmp_Context *ctx )
0830 {
0831   rtems_status_code sc;
0832   size_t            i;
0833 
0834   ctx->runner_id = rtems_task_self();
0835   ctx->mutex_id = CreateMutex();
0836 
0837   for ( i = 0; i < RTEMS_ARRAY_SIZE( ctx->job ); ++i ) {
0838     ctx->job_context[ i ].arg = ctx;
0839     ctx->job[ i ].context = &ctx->job_context[ i ];
0840   }
0841 
0842   sc = rtems_semaphore_create(
0843     rtems_build_name( 'S', 'T', 'K', 'Y' ),
0844     1,
0845     RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY |
0846       RTEMS_MULTIPROCESSOR_RESOURCE_SHARING,
0847     PRIO_NORMAL,
0848     &ctx->sticky_id
0849   );
0850   T_rsc_success( sc );
0851 
0852   SetSelfPriority( PRIO_NORMAL );
0853 
0854   ctx->worker_id[ WORKER_A ] = CreateTask( "WRKA", PRIO_HIGH );
0855   StartTask( ctx->worker_id[ WORKER_A ], WorkerA, ctx );
0856 
0857   ctx->worker_id[ WORKER_B ] = CreateTask( "WRKB", PRIO_HIGH );
0858   StartTask( ctx->worker_id[ WORKER_B ], WorkerB, ctx );
0859 
0860   ctx->worker_id[ WORKER_C ] = CreateTask( "WRKC", PRIO_HIGH );
0861   StartTask( ctx->worker_id[ WORKER_C ], WorkerC, ctx );
0862 }
0863 
0864 static void ScoreSchedSmpValSmp_Setup_Wrap( void *arg )
0865 {
0866   ScoreSchedSmpValSmp_Context *ctx;
0867 
0868   ctx = arg;
0869   ScoreSchedSmpValSmp_Setup( ctx );
0870 }
0871 
0872 static void ScoreSchedSmpValSmp_Teardown( ScoreSchedSmpValSmp_Context *ctx )
0873 {
0874   DeleteTask( ctx->worker_id[ WORKER_A ] );
0875   DeleteTask( ctx->worker_id[ WORKER_B ] );
0876   DeleteTask( ctx->worker_id[ WORKER_C ] );
0877   DeleteMutex( ctx->mutex_id );
0878   DeleteMutex( ctx->sticky_id );
0879   RestoreRunnerPriority();
0880 }
0881 
0882 static void ScoreSchedSmpValSmp_Teardown_Wrap( void *arg )
0883 {
0884   ScoreSchedSmpValSmp_Context *ctx;
0885 
0886   ctx = arg;
0887   ScoreSchedSmpValSmp_Teardown( ctx );
0888 }
0889 
0890 static T_fixture ScoreSchedSmpValSmp_Fixture = {
0891   .setup = ScoreSchedSmpValSmp_Setup_Wrap,
0892   .stop = NULL,
0893   .teardown = ScoreSchedSmpValSmp_Teardown_Wrap,
0894   .scope = NULL,
0895   .initial_context = &ScoreSchedSmpValSmp_Instance
0896 };
0897 
0898 /**
0899  * @brief Construct a system state in which a sticky thread is blocked while an
0900  *   idle thread executes on behalf of the thread.
0901  */
0902 static void ScoreSchedSmpValSmp_Action_0( ScoreSchedSmpValSmp_Context *ctx )
0903 {
0904   SetScheduler( ctx->worker_id[ WORKER_B ], SCHEDULER_B_ID, PRIO_NORMAL );
0905   SendEvents( ctx->worker_id[ WORKER_A ], EVENT_OBTAIN );
0906   SendAndSync( ctx, WORKER_B, EVENT_OBTAIN );
0907   SetPriority( ctx->worker_id[ WORKER_A ], PRIO_NORMAL );
0908   SendEvents( ctx->worker_id[ WORKER_A ], EVENT_STICKY_OBTAIN );
0909   MakeBusy( ctx, WORKER_A );
0910   WaitForBusy( ctx, WORKER_A );
0911 
0912   /*
0913    * Block the sticky worker A while it uses an idle thread in the home
0914    * scheduler.
0915    */
0916   CallWithinISR( BlockAndReuseIdle, ctx );
0917 
0918   /*
0919    * Clean up all used resources.
0920    */
0921   StopBusy( ctx, WORKER_A );
0922   SendAndSync( ctx, WORKER_A, EVENT_STICKY_RELEASE );
0923   SetPriority( ctx->worker_id[ WORKER_A ], PRIO_HIGH );
0924   SetSelfPriority( PRIO_NORMAL );
0925   SendEvents( ctx->worker_id[ WORKER_A ], EVENT_RELEASE );
0926   SendAndSync( ctx, WORKER_B, EVENT_RELEASE );
0927   SetScheduler( ctx->worker_id[ WORKER_B ], SCHEDULER_A_ID, PRIO_HIGH );
0928 }
0929 
0930 /**
0931  * @brief Construct a system state in which a thread is preempted while it is
0932  *   blocked.
0933  */
0934 static void ScoreSchedSmpValSmp_Action_1( ScoreSchedSmpValSmp_Context *ctx )
0935 {
0936   SetScheduler( ctx->worker_id[ WORKER_B ], SCHEDULER_B_ID, PRIO_NORMAL );
0937   SendEvents( ctx->worker_id[ WORKER_A ], EVENT_OBTAIN );
0938   SendAndSync( ctx, WORKER_B, EVENT_OBTAIN );
0939   SetScheduler( ctx->worker_id[ WORKER_C ], SCHEDULER_B_ID, PRIO_HIGH );
0940   SetPriority( ctx->worker_id[ WORKER_A ], PRIO_NORMAL );
0941   MakeBusy( ctx, WORKER_A );
0942   WaitForBusy( ctx, WORKER_A );
0943 
0944   /*
0945    * Block worker A and preempt it before the withdraw node operations are
0946    * performed for worker A.
0947    */
0948   T_scheduler_set_event_handler( BlockAndPreempt, ctx );
0949   SuspendTask( ctx->worker_id[ WORKER_A ] );
0950 
0951   /*
0952    * Clean up all used resources.
0953    */
0954   ResumeTask( ctx->worker_id[ WORKER_A ] );
0955   StopBusy( ctx, WORKER_C );
0956   StopBusy( ctx, WORKER_A );
0957   SetPriority( ctx->worker_id[ WORKER_A ], PRIO_HIGH );
0958   SetSelfPriority( PRIO_NORMAL );
0959   SendEvents( ctx->worker_id[ WORKER_A ], EVENT_RELEASE );
0960   SendAndSync( ctx, WORKER_B, EVENT_RELEASE );
0961   SetScheduler( ctx->worker_id[ WORKER_B ], SCHEDULER_A_ID, PRIO_HIGH );
0962   SetScheduler( ctx->worker_id[ WORKER_C ], SCHEDULER_A_ID, PRIO_HIGH );
0963 }
0964 
0965 /**
0966  * @brief Construct a system state in which a thread is rescheduled  while it
0967  *   is not scheduled on another scheduler.
0968  */
0969 static void ScoreSchedSmpValSmp_Action_2( ScoreSchedSmpValSmp_Context *ctx )
0970 {
0971   SetScheduler( ctx->worker_id[ WORKER_B ], SCHEDULER_B_ID, PRIO_NORMAL );
0972   SendEvents( ctx->worker_id[ WORKER_A ], EVENT_OBTAIN );
0973   SendAndSync( ctx, WORKER_B, EVENT_OBTAIN );
0974   SetScheduler( ctx->worker_id[ WORKER_C ], SCHEDULER_B_ID, PRIO_HIGH );
0975   SetPriority( ctx->worker_id[ WORKER_A ], PRIO_NORMAL );
0976   SendEvents( ctx->worker_id[ WORKER_A ], EVENT_STICKY_OBTAIN );
0977   MakeBusy( ctx, WORKER_A );
0978   WaitForBusy( ctx, WORKER_A );
0979 
0980   /*
0981    * Reschedule worker A by the home scheduler while worker A is not scheduled
0982    * on another scheduler.
0983    */
0984   CallWithinISR( ReadyToScheduled, ctx );
0985 
0986   /*
0987    * Clean up all used resources.
0988    */
0989   StopBusy( ctx, WORKER_C );
0990   StopBusy( ctx, WORKER_A );
0991   SendAndSync( ctx, WORKER_A, EVENT_STICKY_RELEASE );
0992   SetPriority( ctx->worker_id[ WORKER_A ], PRIO_HIGH );
0993   SetSelfPriority( PRIO_NORMAL );
0994   SendEvents( ctx->worker_id[ WORKER_A ], EVENT_RELEASE );
0995   SendAndSync( ctx, WORKER_B, EVENT_RELEASE );
0996   SetScheduler( ctx->worker_id[ WORKER_B ], SCHEDULER_A_ID, PRIO_HIGH );
0997   SetScheduler( ctx->worker_id[ WORKER_C ], SCHEDULER_A_ID, PRIO_HIGH );
0998 }
0999 
1000 /**
1001  * @brief Construct a system state in which an ask for help request is
1002  *   cancelled while it is processed on another processor.
1003  */
1004 static void ScoreSchedSmpValSmp_Action_3( ScoreSchedSmpValSmp_Context *ctx )
1005 {
1006   PrepareOwnerScheduled( ctx );
1007 
1008   /*
1009    * Unblock worker A.  It cannot be scheduled on its home scheduler. Intercept
1010    * the ask for help request.  Block the worker A.  This will cancel the ask
1011    * for help request.  Remove the request while the other processor tries to
1012    * cancel the request.
1013    */
1014   SuspendTask( ctx->worker_id[ WORKER_A ] );
1015   T_scheduler_set_event_handler( UnblockAskForHelp, ctx );
1016   ResumeTask( ctx->worker_id[ WORKER_A ] );
1017 
1018   /*
1019    * Clean up all used resources.
1020    */
1021   ResumeTask( ctx->worker_id[ WORKER_A ] );
1022   StopBusy( ctx, WORKER_C );
1023   CleanupOwnerScheduled( ctx );
1024 }
1025 
1026 /**
1027  * @brief Construct a system state in which a scheduler tries to schedule a
1028  *   node those owner thread is already scheduled during a block operation.
1029  */
1030 static void ScoreSchedSmpValSmp_Action_4( ScoreSchedSmpValSmp_Context *ctx )
1031 {
1032   PrepareOwnerScheduled( ctx );
1033 
1034   /*
1035    * Block the runner thread while the owner thread of the highest priority
1036    * ready node is already scheduled.
1037    */
1038   T_scheduler_set_event_handler( BlockStopBusyC, ctx );
1039   CallWithinISR( Block, ctx );
1040 
1041   /*
1042    * Clean up all used resources.
1043    */
1044   CleanupOwnerScheduled( ctx );
1045 }
1046 
1047 /**
1048  * @brief Construct a system state in which a scheduler tries to schedule a
1049  *   node those owner thread is blocked during a block operation.
1050  */
1051 static void ScoreSchedSmpValSmp_Action_5( ScoreSchedSmpValSmp_Context *ctx )
1052 {
1053   PrepareOwnerBlocked( ctx );
1054 
1055   /*
1056    * Block the runner thread while the owner thread of the highest priority
1057    * ready node is blocked.
1058    */
1059   T_scheduler_set_event_handler( BlockSuspendA, ctx );
1060   CallWithinISR( Block, ctx );
1061 
1062   /*
1063    * Clean up all used resources.
1064    */
1065   CleanupOwnerBlocked( ctx );
1066 }
1067 
1068 /**
1069  * @brief Construct a system state in which a scheduler tries to schedule a
1070  *   node those owner thread is already scheduled during a set affinity
1071  *   operation.
1072  */
1073 static void ScoreSchedSmpValSmp_Action_6( ScoreSchedSmpValSmp_Context *ctx )
1074 {
1075   PrepareOwnerScheduled( ctx );
1076 
1077   /*
1078    * Set the affinity of the runner thread while the owner thread of the
1079    * highest priority ready node is already scheduled.
1080    */
1081   T_scheduler_set_event_handler( SetAffinityStopBusyC, ctx );
1082   SetSelfAffinityAll();
1083 
1084   /*
1085    * Clean up all used resources.
1086    */
1087   CleanupOwnerScheduled( ctx );
1088 }
1089 
1090 /**
1091  * @brief Construct a system state in which a scheduler tries to schedule a
1092  *   node those owner thread is already scheduled during a set affinity
1093  *   operation while a sticky node is involved.
1094  */
1095 static void ScoreSchedSmpValSmp_Action_7( ScoreSchedSmpValSmp_Context *ctx )
1096 {
1097   PrepareOwnerScheduled( ctx );
1098 
1099   /*
1100    * Set the affinity of the runner thread while the owner thread of the
1101    * highest priority ready node is already scheduled.
1102    */
1103   MakeSticky( ctx );
1104   T_scheduler_set_event_handler( SetAffinityStopBusyC, ctx );
1105   SetSelfAffinityAll();
1106   CleanSticky( ctx );
1107 
1108   /*
1109    * Clean up all used resources.
1110    */
1111   CleanupOwnerScheduled( ctx );
1112 }
1113 
1114 /**
1115  * @brief Construct a system state in which a scheduler tries to schedule a
1116  *   node those owner thread is blocked during a set affinity operation.
1117  */
1118 static void ScoreSchedSmpValSmp_Action_8( ScoreSchedSmpValSmp_Context *ctx )
1119 {
1120   PrepareOwnerBlocked( ctx );
1121 
1122   /*
1123    * Set the affinity of the runner thread while the owner thread of the
1124    * highest priority ready node is blocked.
1125    */
1126   T_scheduler_set_event_handler( SetAffinitySuspendA, ctx );
1127   SetSelfAffinityAll();
1128 
1129   /*
1130    * Clean up all used resources.
1131    */
1132   CleanupOwnerBlocked( ctx );
1133 }
1134 
1135 /**
1136  * @brief Construct a system state in which a scheduler tries to schedule a
1137  *   node those owner thread is blocked during a set affinity operation while a
1138  *   sticky node is involved.
1139  */
1140 static void ScoreSchedSmpValSmp_Action_9( ScoreSchedSmpValSmp_Context *ctx )
1141 {
1142   PrepareOwnerBlocked( ctx );
1143 
1144   /*
1145    * Set the affinity of the runner thread while the owner thread of the
1146    * highest priority ready node is blocked.
1147    */
1148   MakeSticky( ctx );
1149   T_scheduler_set_event_handler( SetAffinitySuspendA, ctx );
1150   SetSelfAffinityAll();
1151   CleanSticky( ctx );
1152 
1153   /*
1154    * Clean up all used resources.
1155    */
1156   CleanupOwnerBlocked( ctx );
1157 }
1158 
1159 /**
1160  * @brief Construct a system state in which a scheduler tries to schedule a
1161  *   node those owner thread is already scheduled during a set priority
1162  *   operation.
1163  */
1164 static void ScoreSchedSmpValSmp_Action_10( ScoreSchedSmpValSmp_Context *ctx )
1165 {
1166   PrepareOwnerScheduled( ctx );
1167 
1168   /*
1169    * Set the priority of the runner thread while the owner thread of the
1170    * highest priority ready node is already scheduled.
1171    */
1172   SetSelfPriority( PRIO_HIGH );
1173   T_scheduler_set_event_handler( UpdatePriorityStopBusyC, ctx );
1174   SetSelfPriority( PRIO_NORMAL );
1175 
1176   /*
1177    * Clean up all used resources.
1178    */
1179   CleanupOwnerScheduled( ctx );
1180 }
1181 
1182 /**
1183  * @brief Construct a system state in which a scheduler tries to schedule a
1184  *   node those owner thread is already scheduled during a set priority
1185  *   operation while a sticky node is involved.
1186  */
1187 static void ScoreSchedSmpValSmp_Action_11( ScoreSchedSmpValSmp_Context *ctx )
1188 {
1189   PrepareOwnerScheduled( ctx );
1190 
1191   /*
1192    * Set the priority of the runner thread while the owner thread of the
1193    * highest priority ready node is already scheduled.
1194    */
1195   MakeSticky( ctx );
1196   CallWithinISR( RaiseWorkerPriorityWithIdleRunner, ctx );
1197   CleanSticky( ctx );
1198 
1199   /*
1200    * Clean up all used resources.
1201    */
1202   CleanupOwnerScheduled( ctx );
1203 }
1204 
1205 /**
1206  * @brief Construct a system state in which a scheduler tries to schedule a
1207  *   node those owner thread is blocked during a set priority operation.
1208  */
1209 static void ScoreSchedSmpValSmp_Action_12( ScoreSchedSmpValSmp_Context *ctx )
1210 {
1211   PrepareOwnerBlocked( ctx );
1212 
1213   /*
1214    * Set the priority of the runner thread while the owner thread of the
1215    * highest priority ready node is blocked.
1216    */
1217   SetSelfPriority( PRIO_HIGH );
1218   T_scheduler_set_event_handler( UpdatePrioritySuspendA, ctx );
1219   SetSelfPriority( PRIO_NORMAL );
1220 
1221   /*
1222    * Clean up all used resources.
1223    */
1224   CleanupOwnerBlocked( ctx );
1225 }
1226 
1227 /**
1228  * @brief Construct a system state in which a scheduler tries to schedule a
1229  *   node those owner thread is already scheduled during a yield operation.
1230  */
1231 static void ScoreSchedSmpValSmp_Action_13( ScoreSchedSmpValSmp_Context *ctx )
1232 {
1233   PrepareOwnerScheduled( ctx );
1234 
1235   /*
1236    * Yield while the owner thread of the highest priority ready node is already
1237    * scheduled.
1238    */
1239   T_scheduler_set_event_handler( YieldStopBusyC, ctx );
1240   Yield();
1241 
1242   /*
1243    * Clean up all used resources.
1244    */
1245   CleanupOwnerScheduled( ctx );
1246 }
1247 
1248 /**
1249  * @brief Construct a system state in which a scheduler tries to schedule a
1250  *   node those owner thread is already scheduled during a yield operation
1251  *   while a sticky node is involved.
1252  */
1253 static void ScoreSchedSmpValSmp_Action_14( ScoreSchedSmpValSmp_Context *ctx )
1254 {
1255   PrepareOwnerScheduled( ctx );
1256 
1257   /*
1258    * Yield while the owner thread of the highest priority ready node is already
1259    * scheduled.
1260    */
1261   MakeSticky( ctx );
1262   T_scheduler_set_event_handler( YieldStopBusyC, ctx );
1263   Yield();
1264   CleanSticky( ctx );
1265 
1266   /*
1267    * Clean up all used resources.
1268    */
1269   CleanupOwnerScheduled( ctx );
1270 }
1271 
1272 /**
1273  * @brief Construct a system state in which a scheduler tries to schedule a
1274  *   node those owner thread is blocked during a yield operation.
1275  */
1276 static void ScoreSchedSmpValSmp_Action_15( ScoreSchedSmpValSmp_Context *ctx )
1277 {
1278   PrepareOwnerBlocked( ctx );
1279 
1280   /*
1281    * Yield while the owner thread of the highest priority ready node is
1282    * blocked.
1283    */
1284   T_scheduler_set_event_handler( YieldSuspendA, ctx );
1285   Yield();
1286 
1287   /*
1288    * Clean up all used resources.
1289    */
1290   CleanupOwnerBlocked( ctx );
1291 }
1292 
1293 /**
1294  * @brief Construct a system state in which a scheduler tries to schedule a
1295  *   node those owner thread is blocked during a yield operation while a sticky
1296  *   node is involved.
1297  */
1298 static void ScoreSchedSmpValSmp_Action_16( ScoreSchedSmpValSmp_Context *ctx )
1299 {
1300   PrepareOwnerBlocked( ctx );
1301 
1302   /*
1303    * Yield while the owner thread of the highest priority ready node is
1304    * blocked.
1305    */
1306   MakeSticky( ctx );
1307   T_scheduler_set_event_handler( YieldSuspendA, ctx );
1308   Yield();
1309   CleanSticky( ctx );
1310 
1311   /*
1312    * Clean up all used resources.
1313    */
1314   CleanupOwnerBlocked( ctx );
1315 }
1316 
1317 /**
1318  * @brief Create three worker threads and a mutex.  Use the mutex and the
1319  *   worker to check that a not scheduled thread does not get removed from the
1320  *   set of ready threads of a scheduler when a help request is reconsidered.
1321  */
1322 static void ScoreSchedSmpValSmp_Action_17( ScoreSchedSmpValSmp_Context *ctx )
1323 {
1324   Thread_Control *worker_b;
1325 
1326   SetScheduler( ctx->worker_id[ WORKER_B ], SCHEDULER_B_ID, PRIO_NORMAL );
1327   SetScheduler( ctx->worker_id[ WORKER_C ], SCHEDULER_B_ID, PRIO_HIGH );
1328   SendAndSync( ctx, WORKER_B, EVENT_OBTAIN );
1329   SendEvents( ctx->worker_id[ WORKER_A ], EVENT_OBTAIN );
1330   SetPriority( ctx->worker_id[ WORKER_A ], PRIO_LOW );
1331   MakeBusy( ctx, WORKER_B );
1332   WaitForBusy( ctx, WORKER_B );
1333   MakeBusy( ctx, WORKER_C );
1334   WaitForBusy( ctx, WORKER_C );
1335 
1336   /*
1337    * Prevent that worker B can perform a post-switch cleanup.
1338    */
1339   worker_b = GetThread( ctx->worker_id[ WORKER_B ] );
1340   _Thread_State_acquire( worker_b, &ctx->lock_context );
1341   _ISR_lock_ISR_enable( &ctx->lock_context );
1342 
1343   /*
1344    * Give worker C a lower priority than worker B.  Worker B will try to finish
1345    * the thread dispatch by doing a post-switch cleanup.  The post-switch
1346    * cleanup cannot progress since the runner owns the thread state lock.  Wait
1347    * until the other processor waits on the thread state lock of worker B.
1348    */
1349   SetPriority( ctx->worker_id[ WORKER_C ], PRIO_LOW );
1350   TicketLockWaitForOthers( &worker_b->Join_queue.Queue.Lock, 1 );
1351 
1352   /*
1353    * Give worker C a higher priority than worker B.  Let worker B do its
1354    * post-switch cleanup which will carry out the reconsider help requests for
1355    * a not scheduled thread.
1356    */
1357   ctx->counter = 0;
1358   T_scheduler_set_event_handler( ReleaseThreadLockB, ctx );
1359   SetPriority( ctx->worker_id[ WORKER_C ], PRIO_HIGH );
1360   T_scheduler_set_event_handler( NULL, NULL );
1361   T_eq_u32( ctx->counter, 4 );
1362 
1363   /*
1364    * Clean up all used resources.
1365    */
1366   StopBusy( ctx, WORKER_B );
1367   StopBusy( ctx, WORKER_C );
1368   SendAndSync( ctx, WORKER_B, EVENT_RELEASE );
1369   SetPriority( ctx->worker_id[ WORKER_A ], PRIO_HIGH );
1370   SendEvents( ctx->worker_id[ WORKER_A ], EVENT_RELEASE );
1371   SetScheduler( ctx->worker_id[ WORKER_B ], SCHEDULER_A_ID, PRIO_HIGH );
1372   SetScheduler( ctx->worker_id[ WORKER_C ], SCHEDULER_A_ID, PRIO_HIGH );
1373 }
1374 
1375 /**
1376  * @fn void T_case_body_ScoreSchedSmpValSmp( void )
1377  */
1378 T_TEST_CASE_FIXTURE( ScoreSchedSmpValSmp, &ScoreSchedSmpValSmp_Fixture )
1379 {
1380   ScoreSchedSmpValSmp_Context *ctx;
1381 
1382   ctx = T_fixture_context();
1383 
1384   ScoreSchedSmpValSmp_Action_0( ctx );
1385   ScoreSchedSmpValSmp_Action_1( ctx );
1386   ScoreSchedSmpValSmp_Action_2( ctx );
1387   ScoreSchedSmpValSmp_Action_3( ctx );
1388   ScoreSchedSmpValSmp_Action_4( ctx );
1389   ScoreSchedSmpValSmp_Action_5( ctx );
1390   ScoreSchedSmpValSmp_Action_6( ctx );
1391   ScoreSchedSmpValSmp_Action_7( ctx );
1392   ScoreSchedSmpValSmp_Action_8( ctx );
1393   ScoreSchedSmpValSmp_Action_9( ctx );
1394   ScoreSchedSmpValSmp_Action_10( ctx );
1395   ScoreSchedSmpValSmp_Action_11( ctx );
1396   ScoreSchedSmpValSmp_Action_12( ctx );
1397   ScoreSchedSmpValSmp_Action_13( ctx );
1398   ScoreSchedSmpValSmp_Action_14( ctx );
1399   ScoreSchedSmpValSmp_Action_15( ctx );
1400   ScoreSchedSmpValSmp_Action_16( ctx );
1401   ScoreSchedSmpValSmp_Action_17( ctx );
1402 }
1403 
1404 /** @} */