Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:24:52

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /**
0004  * @file
0005  *
0006  * @ingroup ScoreThreadValSmp
0007  */
0008 
0009 /*
0010  * Copyright (C) 2021 embedded brains GmbH & Co. KG
0011  *
0012  * Redistribution and use in source and binary forms, with or without
0013  * modification, are permitted provided that the following conditions
0014  * are met:
0015  * 1. Redistributions of source code must retain the above copyright
0016  *    notice, this list of conditions and the following disclaimer.
0017  * 2. Redistributions in binary form must reproduce the above copyright
0018  *    notice, this list of conditions and the following disclaimer in the
0019  *    documentation and/or other materials provided with the distribution.
0020  *
0021  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0022  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0023  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0024  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
0025  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0026  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0027  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0028  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0029  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0030  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
0031  * POSSIBILITY OF SUCH DAMAGE.
0032  */
0033 
0034 /*
0035  * This file is part of the RTEMS quality process and was automatically
0036  * generated.  If you find something that needs to be fixed or
0037  * worded better please post a report or patch to an RTEMS mailing list
0038  * or raise a bug report:
0039  *
0040  * https://www.rtems.org/bugs.html
0041  *
0042  * For information on updating and regenerating please refer to the How-To
0043  * section in the Software Requirements Engineering chapter of the
0044  * RTEMS Software Engineering manual.  The manual is provided as a part of
0045  * a release.  For development sources please refer to the online
0046  * documentation at:
0047  *
0048  * https://docs.rtems.org
0049  */
0050 
0051 #ifdef HAVE_CONFIG_H
0052 #include "config.h"
0053 #endif
0054 
0055 #include <rtems.h>
0056 #include <rtems/test-scheduler.h>
0057 #include <rtems/score/smpbarrier.h>
0058 #include <rtems/score/threadimpl.h>
0059 
0060 #include "ts-config.h"
0061 #include "tx-support.h"
0062 
0063 #include <rtems/test.h>
0064 
0065 /**
0066  * @defgroup ScoreThreadValSmp spec:/score/thread/val/smp
0067  *
0068  * @ingroup TestsuitesValidationSmpOnly0
0069  *
0070  * @brief Tests SMP-specific thread behaviour.
0071  *
0072  * This test case performs the following actions:
0073  *
0074  * - Create three worker threads and a mutex.  Use the mutex and the worker to
0075  *   move to a helping scheduler.
0076  *
0077  *   - Pin the runner thread while it executes on a processor owned by a
0078  *     helping scheduler.
0079  *
0080  *   - Pin and unpin the runner thread.  This is a nested operation.
0081  *
0082  *   - Preempt the pinned runner thread.  Worker B and C execute at the same
0083  *     time on processor 0 and 1 respectively for some point in time.  This
0084  *     shows that the pinning of the runner thread is maintained.
0085  *
0086  *   - Unpin the runner thread.  The runner moves back to its home scheduler.
0087  *
0088  *   - Release the mutex.
0089  *
0090  *   - Pin the runner thread.  Unpin the runner thread while it is suspended.
0091  *
0092  *   - Make sure the worker released the mutex.
0093  *
0094  *   - Clean up all used resources.
0095  *
0096  * - Create three worker threads and a mutex.  Use the mutex and the worker to
0097  *   check that a suspended thread does not reconsider help requests.
0098  *
0099  *   - Let worker B help worker A through the mutex.  Preempt worker A.  Delay
0100  *     the thread switch to worker A.
0101  *
0102  *   - Suspend worker A and let it wait on its thread state lock.  Check that
0103  *     worker A did not reconsider help requests.
0104  *
0105  *   - Resume worker A.  Check that worker A did reconsider help requests after
0106  *     the thread dispatch.
0107  *
0108  *   - Clean up all used resources.
0109  *
0110  * - Create four worker threads and three mutexes.  Provoke an explicit thread
0111  *   priority change while a priority inheritance change is in progress.  The
0112  *   explicit thread priority change propagates through priority inheritance.
0113  *
0114  *   - Create the following dependencies MA -> WA and TC -> MB -> WA.
0115  *
0116  *   - Acquire the worker A default thread wait lock.  Start creating the
0117  *     dependency TB -> MB (we already have MB -> WA).  Make sure it stops
0118  *     while acquiring the worker A default thread wait lock.  Prepare the
0119  *     worker A default thread wait lock release.  Raise the worker C priority.
0120  *     This operation will call the wrapped _Thread_queue_Path_acquire() and
0121  *     trigger the prepared release of the worker A default thread wait lock.
0122  *     The worker A default wait lock critical sections will execute now in the
0123  *     prepared sequence.
0124  *
0125  *   - Clean up all used resources.
0126  *
0127  * @{
0128  */
0129 
0130 /**
0131  * @brief Test context for spec:/score/thread/val/smp test case.
0132  */
0133 typedef struct {
0134   /**
0135    * @brief This member contains the worker A identifier.
0136    */
0137   rtems_id worker_a_id;
0138 
0139   /**
0140    * @brief This member contains the worker B identifier.
0141    */
0142   rtems_id worker_b_id;
0143 
0144   /**
0145    * @brief This member contains the worker C identifier.
0146    */
0147   rtems_id worker_c_id;
0148 
0149   /**
0150    * @brief This member contains the mutex A identifier.
0151    */
0152   rtems_id mutex_a_id;
0153 
0154   /**
0155    * @brief This member contains the mutex B identifier.
0156    */
0157   rtems_id mutex_b_id;
0158 
0159   /**
0160    * @brief If this member is true, then the worker shall busy wait.
0161    */
0162   volatile bool busy;
0163 
0164   /**
0165    * @brief This member contains a counter for EVENT_COUNT.
0166    */
0167   volatile uint32_t counter;
0168 
0169   /**
0170    * @brief This member contains the barrier to synchronize the runner and the
0171    *   workers.
0172    */
0173   SMP_barrier_Control barrier;
0174 
0175   /**
0176    * @brief This member contains the barrier state for the runner processor.
0177    */
0178   SMP_barrier_State barrier_state;
0179 
0180   /**
0181    * @brief This member references the processor on which the worker A wait
0182    *   default lock was acquired.
0183    */
0184   Per_CPU_Control *worker_a_wait_default_lock_cpu;
0185 
0186   /**
0187    * @brief This member contains the lock context for the worker A wait default
0188    *   lock acquire and release.
0189    */
0190   ISR_lock_Context worker_a_wait_default_lock_context;
0191 } ScoreThreadValSmp_Context;
0192 
0193 static ScoreThreadValSmp_Context
0194   ScoreThreadValSmp_Instance;
0195 
0196 #define EVENT_A_OBTAIN RTEMS_EVENT_0
0197 
0198 #define EVENT_A_RELEASE RTEMS_EVENT_1
0199 
0200 #define EVENT_B_OBTAIN RTEMS_EVENT_2
0201 
0202 #define EVENT_B_RELEASE RTEMS_EVENT_3
0203 
0204 #define EVENT_COUNT_EARLY RTEMS_EVENT_4
0205 
0206 #define EVENT_BUSY RTEMS_EVENT_5
0207 
0208 #define EVENT_COUNT RTEMS_EVENT_6
0209 
0210 #define EVENT_LET_WORKER_C_COUNT RTEMS_EVENT_7
0211 
0212 #define EVENT_SET_TASK_SWITCH_EXTENSION RTEMS_EVENT_8
0213 
0214 typedef ScoreThreadValSmp_Context Context;
0215 
0216 static Context *release_worker_a_wait_default;
0217 
0218 static void TaskSwitchExtension( rtems_tcb *executing, rtems_tcb *heir )
0219 {
0220   Context        *ctx;
0221   Thread_Control *thread;
0222 
0223   (void) executing;
0224   (void) heir;
0225 
0226   ctx = T_fixture_context();
0227   thread = GetThread( ctx->worker_a_id );
0228 
0229   if ( thread == heir ) {
0230     SMP_barrier_State state;
0231 
0232     _SMP_barrier_State_initialize( &state );
0233 
0234     /* B0 */
0235     _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
0236 
0237     /* B1 */
0238     _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
0239   }
0240 }
0241 
0242 static void WorkerTask( rtems_task_argument arg )
0243 {
0244   Context *ctx;
0245 
0246   ctx = (Context *) arg;
0247 
0248   while ( true ) {
0249     rtems_event_set events;
0250 
0251     events = ReceiveAnyEvents();
0252 
0253     if ( ( events & EVENT_A_OBTAIN ) != 0 ) {
0254       ObtainMutex( ctx->mutex_a_id );
0255     }
0256 
0257     if ( ( events & EVENT_A_RELEASE ) != 0 ) {
0258       ReleaseMutex( ctx->mutex_a_id );
0259     }
0260 
0261     if ( ( events & EVENT_B_OBTAIN ) != 0 ) {
0262       ObtainMutex( ctx->mutex_b_id );
0263     }
0264 
0265     if ( ( events & EVENT_B_RELEASE ) != 0 ) {
0266       ReleaseMutex( ctx->mutex_b_id );
0267     }
0268 
0269     if ( ( events & EVENT_COUNT_EARLY ) != 0 ) {
0270       ++ctx->counter;
0271     }
0272 
0273     if ( ( events & EVENT_BUSY ) != 0 ) {
0274       while ( ctx->busy ) {
0275         /* Do nothing */
0276       }
0277     }
0278 
0279     if ( ( events & EVENT_COUNT ) != 0 ) {
0280       ++ctx->counter;
0281     }
0282 
0283     if ( ( events & EVENT_LET_WORKER_C_COUNT ) != 0 ) {
0284       uint32_t counter;
0285 
0286       counter = ctx->counter;
0287       SendEvents( ctx->worker_c_id, EVENT_COUNT );
0288 
0289       while ( ctx->counter == counter ) {
0290         /* Wait */
0291       }
0292     }
0293 
0294     if ( ( events & EVENT_SET_TASK_SWITCH_EXTENSION ) != 0 ) {
0295       SetTaskSwitchExtension( TaskSwitchExtension );
0296     }
0297   }
0298 }
0299 
0300 static void SchedulerBlock(
0301   void                    *arg,
0302   const T_scheduler_event *event,
0303   T_scheduler_when         when
0304 )
0305 {
0306   Context *ctx;
0307 
0308   ctx = arg;
0309 
0310   if (
0311     when == T_SCHEDULER_BEFORE &&
0312     event->operation == T_SCHEDULER_BLOCK
0313   ) {
0314     Thread_Control *thread;
0315 
0316     T_scheduler_set_event_handler( NULL, NULL );
0317 
0318     /* B1 */
0319     _SMP_barrier_Wait( &ctx->barrier, &ctx->barrier_state, 2 );
0320 
0321     thread = GetThread( ctx->worker_a_id );
0322     TicketLockWaitForOthers( &thread->Join_queue.Queue.Lock, 1 );
0323   }
0324 }
0325 
0326 static void Suspend( void *arg )
0327 {
0328   Thread_Control *thread;
0329 
0330   thread = arg;
0331   SuspendTask( thread->Object.id );
0332 }
0333 
0334 static void Resume( void *arg )
0335 {
0336   Thread_Control *thread;
0337 
0338   thread = arg;
0339   ResumeTask( thread->Object.id );
0340 }
0341 
0342 static void WaitForCounter( const Context *ctx, uint32_t expected )
0343 {
0344   while ( ctx->counter != expected ) {
0345     /* Wait */
0346   }
0347 }
0348 
0349 static void DoMutexOperation(
0350   rtems_id        worker_id,
0351   rtems_id        mutex_id,
0352   rtems_event_set event
0353 )
0354 {
0355   Thread_queue_Queue *queue;
0356   TicketLockState     lock_state;
0357 
0358   queue = GetMutexThreadQueue( mutex_id );
0359   TicketLockGetState( &queue->Lock, &lock_state );
0360   SendEvents( worker_id, event );
0361   TicketLockWaitForReleases( &lock_state, 1 );
0362 }
0363 
0364 Thread_queue_Deadlock_status __wrap__Thread_queue_Path_acquire(
0365   Thread_queue_Queue   *queue,
0366   Thread_Control       *the_thread,
0367   Thread_queue_Context *queue_context
0368 );
0369 
0370 Thread_queue_Deadlock_status __real__Thread_queue_Path_acquire(
0371   Thread_queue_Queue   *queue,
0372   Thread_Control       *the_thread,
0373   Thread_queue_Context *queue_context
0374 );
0375 
0376 Thread_queue_Deadlock_status __wrap__Thread_queue_Path_acquire(
0377   Thread_queue_Queue   *queue,
0378   Thread_Control       *the_thread,
0379   Thread_queue_Context *queue_context
0380 )
0381 {
0382   Context *ctx;
0383 
0384   ctx = release_worker_a_wait_default;
0385 
0386   if (
0387     ctx != NULL &&
0388     ctx->worker_a_wait_default_lock_cpu == _Per_CPU_Get()
0389   ) {
0390     Thread_Control *worker_a;
0391 
0392     release_worker_a_wait_default = NULL;
0393     worker_a = GetThread( ctx->worker_a_id );
0394     _Thread_Wait_release_default_critical(
0395       worker_a,
0396       &ctx->worker_a_wait_default_lock_context
0397     );
0398   }
0399 
0400   return __real__Thread_queue_Path_acquire(
0401     queue,
0402     the_thread,
0403     queue_context
0404   );
0405 }
0406 
0407 static void ScoreThreadValSmp_Setup( ScoreThreadValSmp_Context *ctx )
0408 {
0409   SetSelfPriority( PRIO_NORMAL );
0410 }
0411 
0412 static void ScoreThreadValSmp_Setup_Wrap( void *arg )
0413 {
0414   ScoreThreadValSmp_Context *ctx;
0415 
0416   ctx = arg;
0417   ScoreThreadValSmp_Setup( ctx );
0418 }
0419 
0420 static void ScoreThreadValSmp_Teardown( ScoreThreadValSmp_Context *ctx )
0421 {
0422   RestoreRunnerPriority();
0423 }
0424 
0425 static void ScoreThreadValSmp_Teardown_Wrap( void *arg )
0426 {
0427   ScoreThreadValSmp_Context *ctx;
0428 
0429   ctx = arg;
0430   ScoreThreadValSmp_Teardown( ctx );
0431 }
0432 
0433 static T_fixture ScoreThreadValSmp_Fixture = {
0434   .setup = ScoreThreadValSmp_Setup_Wrap,
0435   .stop = NULL,
0436   .teardown = ScoreThreadValSmp_Teardown_Wrap,
0437   .scope = NULL,
0438   .initial_context = &ScoreThreadValSmp_Instance
0439 };
0440 
0441 /**
0442  * @brief Create three worker threads and a mutex.  Use the mutex and the
0443  *   worker to move to a helping scheduler.
0444  */
0445 static void ScoreThreadValSmp_Action_0( ScoreThreadValSmp_Context *ctx )
0446 {
0447   Per_CPU_Control*cpu_self;
0448   Thread_Control *executing;
0449 
0450   executing = _Thread_Get_executing();
0451   ctx->counter = 0;
0452 
0453   ctx->mutex_a_id = CreateMutex();
0454 
0455   ctx->worker_a_id = CreateTask( "WRKA", PRIO_NORMAL );
0456   SetScheduler( ctx->worker_a_id, SCHEDULER_B_ID, PRIO_NORMAL );
0457   StartTask( ctx->worker_a_id, WorkerTask, ctx );
0458 
0459   ctx->worker_b_id = CreateTask( "WRKB", PRIO_HIGH );
0460   StartTask( ctx->worker_b_id, WorkerTask, ctx );
0461 
0462   ctx->worker_c_id = CreateTask( "WRKC", PRIO_LOW );
0463   StartTask( ctx->worker_c_id, WorkerTask, ctx );
0464 
0465   ObtainMutex( ctx->mutex_a_id );
0466   SendEvents( ctx->worker_a_id, EVENT_A_OBTAIN | EVENT_A_RELEASE );
0467 
0468   ctx->busy = true;
0469   SendEvents( ctx->worker_b_id, EVENT_BUSY );
0470 
0471   /*
0472    * Pin the runner thread while it executes on a processor owned by a helping
0473    * scheduler.
0474    */
0475   T_eq_u32( rtems_scheduler_get_processor(), 1 );
0476   _Thread_Pin( executing );
0477 
0478   /*
0479    * Pin and unpin the runner thread.  This is a nested operation.
0480    */
0481   T_eq_u32( rtems_scheduler_get_processor(), 1 );
0482   _Thread_Pin( executing );
0483   _Thread_Unpin( executing, _Per_CPU_Get_snapshot() );
0484 
0485   /*
0486    * Preempt the pinned runner thread.  Worker B and C execute at the same time
0487    * on processor 0 and 1 respectively for some point in time.  This shows that
0488    * the pinning of the runner thread is maintained.
0489    */
0490   ctx->busy = false;
0491   SetScheduler( ctx->worker_b_id, SCHEDULER_B_ID, PRIO_HIGH );
0492   SendEvents( ctx->worker_b_id, EVENT_LET_WORKER_C_COUNT );
0493 
0494   T_eq_u32( rtems_scheduler_get_processor(), 1 );
0495   T_eq_u32( ctx->counter, 1 );
0496 
0497   /*
0498    * Unpin the runner thread.  The runner moves back to its home scheduler.
0499    */
0500   cpu_self = _Thread_Dispatch_disable();
0501   _Thread_Unpin( executing, cpu_self );
0502   _Thread_Dispatch_direct( cpu_self );
0503 
0504   T_eq_u32( rtems_scheduler_get_processor(), 0 );
0505 
0506   /*
0507    * Release the mutex.
0508    */
0509   ReleaseMutex( ctx->mutex_a_id);
0510   T_eq_u32( rtems_scheduler_get_processor(), 0 );
0511 
0512   /*
0513    * Pin the runner thread.  Unpin the runner thread while it is suspended.
0514    */
0515   _Thread_Pin( executing );
0516 
0517   /* We have to preempt the runner to end up in _Thread_Do_unpin() */
0518   SetPriority( ctx->worker_c_id, PRIO_HIGH );
0519   SendEvents( ctx->worker_c_id, EVENT_COUNT );
0520   T_eq_u32( ctx->counter, 2 );
0521 
0522   cpu_self = _Thread_Dispatch_disable();
0523   CallWithinISR( Suspend, executing );
0524   _Thread_Unpin( executing, cpu_self );
0525   CallWithinISR( Resume, executing );
0526   _Thread_Dispatch_direct( cpu_self );
0527 
0528   /*
0529    * Make sure the worker released the mutex.
0530    */
0531   SetSelfScheduler( SCHEDULER_B_ID, PRIO_LOW );
0532   SetSelfScheduler( SCHEDULER_A_ID, PRIO_NORMAL );
0533 
0534   /*
0535    * Clean up all used resources.
0536    */
0537   DeleteTask( ctx->worker_a_id );
0538   DeleteTask( ctx->worker_b_id );
0539   DeleteTask( ctx->worker_c_id );
0540   DeleteMutex( ctx->mutex_a_id );
0541 }
0542 
0543 /**
0544  * @brief Create three worker threads and a mutex.  Use the mutex and the
0545  *   worker to check that a suspended thread does not reconsider help requests.
0546  */
0547 static void ScoreThreadValSmp_Action_1( ScoreThreadValSmp_Context *ctx )
0548 {
0549   T_scheduler_log_10       scheduler_log;
0550   size_t                   index;
0551   const T_scheduler_event *event;
0552 
0553   _SMP_barrier_Control_initialize( &ctx->barrier );
0554   _SMP_barrier_State_initialize( &ctx->barrier_state );
0555 
0556   ctx->counter = 0;
0557   ctx->mutex_a_id = CreateMutex();
0558 
0559   ctx->worker_a_id = CreateTask( "WRKA", PRIO_NORMAL );
0560   SetScheduler( ctx->worker_a_id, SCHEDULER_B_ID, PRIO_NORMAL );
0561   StartTask( ctx->worker_a_id, WorkerTask, ctx );
0562 
0563   ctx->worker_b_id = CreateTask( "WRKB", PRIO_HIGH );
0564   StartTask( ctx->worker_b_id, WorkerTask, ctx );
0565 
0566   ctx->worker_c_id = CreateTask( "WRKC", PRIO_NORMAL );
0567   SetScheduler( ctx->worker_c_id, SCHEDULER_B_ID, PRIO_HIGH );
0568   StartTask( ctx->worker_c_id, WorkerTask, ctx );
0569 
0570   /*
0571    * Let worker B help worker A through the mutex.  Preempt worker A.  Delay
0572    * the thread switch to worker A.
0573    */
0574   ctx->busy = true;
0575   SendEvents(
0576     ctx->worker_a_id,
0577     EVENT_A_OBTAIN | EVENT_COUNT_EARLY | EVENT_BUSY | EVENT_COUNT
0578   );
0579   WaitForCounter( ctx, 1 );
0580 
0581   SendEvents( ctx->worker_b_id, EVENT_A_OBTAIN );
0582   SetPriority( ctx->worker_b_id, PRIO_LOW );
0583   SendEvents( ctx->worker_c_id, EVENT_SET_TASK_SWITCH_EXTENSION );
0584 
0585   /* B0 */
0586   _SMP_barrier_Wait( &ctx->barrier, &ctx->barrier_state, 2 );
0587 
0588   /*
0589    * Suspend worker A and let it wait on its thread state lock.  Check that
0590    * worker A did not reconsider help requests.
0591    */
0592   T_scheduler_record_10( &scheduler_log );
0593   T_scheduler_set_event_handler( SchedulerBlock, ctx );
0594   SuspendTask( ctx->worker_a_id );
0595   WaitForExecutionStop( ctx->worker_a_id );
0596   T_scheduler_record( NULL );
0597   T_eq_sz( scheduler_log.header.recorded, 2 );
0598   index = 0;
0599   event = T_scheduler_next_any( &scheduler_log.header, &index );
0600   T_eq_int( event->operation, T_SCHEDULER_BLOCK );
0601   event = T_scheduler_next_any( &scheduler_log.header, &index );
0602   T_eq_int( event->operation, T_SCHEDULER_WITHDRAW_NODE );
0603   event = T_scheduler_next_any( &scheduler_log.header, &index );
0604   T_eq_ptr( event, &T_scheduler_event_null );
0605   SetTaskSwitchExtension( NULL );
0606 
0607   /*
0608    * Resume worker A.  Check that worker A did reconsider help requests after
0609    * the thread dispatch.
0610    */
0611   T_scheduler_record_10( &scheduler_log );
0612   ResumeTask( ctx->worker_a_id );
0613   ctx->busy = false;
0614   WaitForCounter( ctx, 2 );
0615   WaitForExecutionStop( ctx->worker_a_id );
0616   T_scheduler_record( NULL );
0617   T_eq_sz( scheduler_log.header.recorded, 5 );
0618   index = 0;
0619   event = T_scheduler_next_any( &scheduler_log.header, &index );
0620   T_eq_int( event->operation, T_SCHEDULER_UNBLOCK );
0621   event = T_scheduler_next_any( &scheduler_log.header, &index );
0622   T_eq_int( event->operation, T_SCHEDULER_RECONSIDER_HELP_REQUEST );
0623   event = T_scheduler_next_any( &scheduler_log.header, &index );
0624   T_eq_int( event->operation, T_SCHEDULER_RECONSIDER_HELP_REQUEST );
0625   event = T_scheduler_next_any( &scheduler_log.header, &index );
0626   T_eq_int( event->operation, T_SCHEDULER_BLOCK );
0627   event = T_scheduler_next_any( &scheduler_log.header, &index );
0628   T_eq_int( event->operation, T_SCHEDULER_WITHDRAW_NODE );
0629   event = T_scheduler_next_any( &scheduler_log.header, &index );
0630   T_eq_ptr( event, &T_scheduler_event_null );
0631 
0632   /*
0633    * Clean up all used resources.
0634    */
0635   SendEvents( ctx->worker_a_id, EVENT_A_RELEASE | EVENT_COUNT );
0636   WaitForCounter( ctx, 3 );
0637 
0638   SetPriority( ctx->worker_b_id, PRIO_HIGH );
0639   SendEvents( ctx->worker_b_id, EVENT_A_RELEASE );
0640 
0641   DeleteTask( ctx->worker_a_id );
0642   DeleteTask( ctx->worker_b_id );
0643   DeleteTask( ctx->worker_c_id );
0644   DeleteMutex( ctx->mutex_a_id );
0645 }
0646 
0647 /**
0648  * @brief Create four worker threads and three mutexes.  Provoke an explicit
0649  *   thread priority change while a priority inheritance change is in progress.
0650  *   The explicit thread priority change propagates through priority
0651  *   inheritance.
0652  */
0653 static void ScoreThreadValSmp_Action_2( ScoreThreadValSmp_Context *ctx )
0654 {
0655   Thread_Control *worker_a;
0656 
0657   ctx->mutex_a_id = CreateMutex();
0658   ctx->mutex_b_id = CreateMutex();
0659 
0660   ctx->worker_a_id = CreateTask( "WRKA", PRIO_HIGH );
0661   StartTask( ctx->worker_a_id, WorkerTask, ctx );
0662 
0663   ctx->worker_b_id = CreateTask( "WRKB", PRIO_NORMAL );
0664   SetScheduler( ctx->worker_b_id, SCHEDULER_B_ID, PRIO_NORMAL );
0665   StartTask( ctx->worker_b_id, WorkerTask, ctx );
0666 
0667   ctx->worker_c_id = CreateTask( "WRKC", PRIO_NORMAL );
0668   SetScheduler( ctx->worker_c_id, SCHEDULER_B_ID, PRIO_NORMAL );
0669   StartTask( ctx->worker_c_id, WorkerTask, ctx );
0670 
0671   /*
0672    * Create the following dependencies MA -> WA and TC -> MB -> WA.
0673    */
0674   DoMutexOperation( ctx->worker_a_id, ctx->mutex_a_id, EVENT_A_OBTAIN );
0675   DoMutexOperation( ctx->worker_a_id, ctx->mutex_b_id, EVENT_B_OBTAIN );
0676   DoMutexOperation( ctx->worker_c_id, ctx->mutex_a_id, EVENT_A_OBTAIN );
0677 
0678   /*
0679    * Acquire the worker A default thread wait lock.  Start creating the
0680    * dependency TB -> MB (we already have MB -> WA).  Make sure it stops while
0681    * acquiring the worker A default thread wait lock.  Prepare the worker A
0682    * default thread wait lock release.  Raise the worker C priority.  This
0683    * operation will call the wrapped _Thread_queue_Path_acquire() and trigger
0684    * the prepared release of the worker A default thread wait lock.  The worker
0685    * A default wait lock critical sections will execute now in the prepared
0686    * sequence.
0687    */
0688   worker_a = GetThread( ctx->worker_a_id );
0689   _ISR_lock_ISR_disable( &ctx->worker_a_wait_default_lock_context );
0690   _Thread_Wait_acquire_default_critical(
0691     worker_a,
0692     &ctx->worker_a_wait_default_lock_context
0693   );
0694   ctx->worker_a_wait_default_lock_cpu = _Per_CPU_Get();
0695   _ISR_lock_ISR_enable( &ctx->worker_a_wait_default_lock_context );
0696 
0697   SendEvents( ctx->worker_b_id, EVENT_B_OBTAIN );
0698   TicketLockWaitForOthers( &worker_a->Wait.Lock.Default.Lock.Ticket_lock, 1 );
0699 
0700   release_worker_a_wait_default = ctx;
0701 
0702   SetPriority( ctx->worker_c_id, PRIO_HIGH );
0703 
0704   /*
0705    * Clean up all used resources.
0706    */
0707   DoMutexOperation( ctx->worker_a_id, ctx->mutex_a_id, EVENT_A_RELEASE );
0708   DoMutexOperation( ctx->worker_a_id, ctx->mutex_b_id, EVENT_B_RELEASE );
0709   DoMutexOperation( ctx->worker_b_id, ctx->mutex_b_id, EVENT_B_RELEASE );
0710   DoMutexOperation( ctx->worker_c_id, ctx->mutex_a_id, EVENT_A_RELEASE );
0711 
0712   DeleteTask( ctx->worker_a_id );
0713   DeleteTask( ctx->worker_b_id );
0714   DeleteTask( ctx->worker_c_id );
0715 
0716   DeleteMutex( ctx->mutex_a_id );
0717   DeleteMutex( ctx->mutex_b_id );
0718 }
0719 
0720 /**
0721  * @fn void T_case_body_ScoreThreadValSmp( void )
0722  */
0723 T_TEST_CASE_FIXTURE( ScoreThreadValSmp, &ScoreThreadValSmp_Fixture )
0724 {
0725   ScoreThreadValSmp_Context *ctx;
0726 
0727   ctx = T_fixture_context();
0728 
0729   ScoreThreadValSmp_Action_0( ctx );
0730   ScoreThreadValSmp_Action_1( ctx );
0731   ScoreThreadValSmp_Action_2( ctx );
0732 }
0733 
0734 /** @} */