Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:24:52

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /**
0004  * @file
0005  *
0006  * @ingroup ScoreTqValSmp
0007  */
0008 
0009 /*
0010  * Copyright (C) 2021 embedded brains GmbH & Co. KG
0011  *
0012  * Redistribution and use in source and binary forms, with or without
0013  * modification, are permitted provided that the following conditions
0014  * are met:
0015  * 1. Redistributions of source code must retain the above copyright
0016  *    notice, this list of conditions and the following disclaimer.
0017  * 2. Redistributions in binary form must reproduce the above copyright
0018  *    notice, this list of conditions and the following disclaimer in the
0019  *    documentation and/or other materials provided with the distribution.
0020  *
0021  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0022  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0023  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0024  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
0025  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0026  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0027  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0028  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0029  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0030  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
0031  * POSSIBILITY OF SUCH DAMAGE.
0032  */
0033 
0034 /*
0035  * This file is part of the RTEMS quality process and was automatically
0036  * generated.  If you find something that needs to be fixed or
0037  * worded better please post a report or patch to an RTEMS mailing list
0038  * or raise a bug report:
0039  *
0040  * https://www.rtems.org/bugs.html
0041  *
0042  * For information on updating and regenerating please refer to the How-To
0043  * section in the Software Requirements Engineering chapter of the
0044  * RTEMS Software Engineering manual.  The manual is provided as a part of
0045  * a release.  For development sources please refer to the online
0046  * documentation at:
0047  *
0048  * https://docs.rtems.org
0049  */
0050 
0051 #ifdef HAVE_CONFIG_H
0052 #include "config.h"
0053 #endif
0054 
0055 #include <rtems/score/smpbarrier.h>
0056 #include <rtems/score/threadimpl.h>
0057 #include <rtems/score/threadqimpl.h>
0058 
0059 #include "tx-support.h"
0060 
0061 #include <rtems/test.h>
0062 
0063 /**
0064  * @defgroup ScoreTqValSmp spec:/score/tq/val/smp
0065  *
0066  * @ingroup TestsuitesValidationSmpOnly0
0067  *
0068  * @brief Tests SMP-specific thread queue behaviour.
0069  *
0070  * This test case performs the following actions:
0071  *
0072  * - Create two or three worker threads and a mutex.  Use the mutex and the
0073  *   worker to do a thread priority change in parallel with a thread queue
0074  *   extraction.
0075  *
0076  *   - Create a mutex and let the runner obtain it.
0077  *
0078  *   - Create and start worker A on a second processor. mutex.  Let it wait on
0079  *     the barrier.
0080  *
0081  *   - If there are more than two processors, then create and start also worker
0082  *     C.  Let it wait on the barrier.
0083  *
0084  *   - Create and start worker B.  Let it try to obtain the mutex which is
0085  *     owned by the runner.  Delete worker B to extract it from the thread
0086  *     queue. Wrap the thread queue extract operation to do a parallel thread
0087  *     priority change carried out by worker A (and maybe C).
0088  *
0089  *   - Clean up all used resources.
0090  *
0091  * - Build a cyclic dependency graph using several worker threads and mutexes.
0092  *   Use the mutexes and the worker to construct a thread queue deadlock which
0093  *   is detected on one processor while it uses thread queue links inserted by
0094  *   another processor.  The runner thread controls the test scenario via the
0095  *   two thread queue locks.  This is an important test scenario which shows
0096  *   why the thread queue implementation is a bit more complicated in SMP
0097  *   configurations.
0098  *
0099  *   - Let worker D wait for mutex A.  Let worker C wait for mutex D.  Let
0100  *     worker B wait for mutex C.
0101  *
0102  *   - Let worker A attempt to obtain mutex B.  Let worker A wait on the lock
0103  *     of mutex C.  Worker A will insert two thread queue links.
0104  *
0105  *   - Let worker E try to obtain mutex D.  Worker E will add a thread queue
0106  *     link which is later used by worker A to detect the deadlock.
0107  *
0108  *   - Let worker A continue the obtain sequence.  It will detect a deadlock.
0109  *
0110  *   - Clean up all used resources.
0111  *
0112  * @{
0113  */
0114 
0115 /**
0116  * @brief Test context for spec:/score/tq/val/smp test case.
0117  */
0118 typedef struct {
0119   /**
0120    * @brief This member contains the runner identifier.
0121    */
0122   rtems_id runner_id;
0123 
0124   /**
0125    * @brief This member contains the worker A identifier.
0126    */
0127   rtems_id worker_a_id;
0128 
0129   /**
0130    * @brief This member contains the worker B identifier.
0131    */
0132   rtems_id worker_b_id;
0133 
0134   /**
0135    * @brief This member contains the worker C identifier.
0136    */
0137   rtems_id worker_c_id;
0138 
0139   /**
0140    * @brief This member contains the worker D identifier.
0141    */
0142   rtems_id worker_d_id;
0143 
0144   /**
0145    * @brief This member contains the worker E identifier.
0146    */
0147   rtems_id worker_e_id;
0148 
0149   /**
0150    * @brief This member contains the mutex A identifier.
0151    */
0152   rtems_id mutex_a_id;
0153 
0154   /**
0155    * @brief This member contains the mutex B identifier.
0156    */
0157   rtems_id mutex_b_id;
0158 
0159   /**
0160    * @brief This member contains the mutex C identifier.
0161    */
0162   rtems_id mutex_c_id;
0163 
0164   /**
0165    * @brief This member contains the mutex D identifier.
0166    */
0167   rtems_id mutex_d_id;
0168 
0169   /**
0170    * @brief This member contains the count of processors used by the test.
0171    */
0172   uint32_t used_cpus;
0173 
0174   /**
0175    * @brief This member contains the thread queue of the mutex.
0176    */
0177   Thread_queue_Queue *thread_queue;
0178 
0179   /**
0180    * @brief This member contains the context to wrap the thread queue extract.
0181    */
0182   WrapThreadQueueContext wrap;
0183 
0184   /**
0185    * @brief This member contains the barrier to synchronize the runner and the
0186    *   workers.
0187    */
0188   SMP_barrier_Control barrier;
0189 
0190   /**
0191    * @brief This member contains the barrier state for the runner processor.
0192    */
0193   SMP_barrier_State barrier_state;
0194 } ScoreTqValSmp_Context;
0195 
0196 static ScoreTqValSmp_Context
0197   ScoreTqValSmp_Instance;
0198 
0199 typedef ScoreTqValSmp_Context Context;
0200 
0201 static void Extract( void *arg )
0202 {
0203   Context *ctx;
0204 
0205   ctx = arg;
0206 
0207   /* PC0 */
0208   _SMP_barrier_Wait( &ctx->barrier, &ctx->barrier_state, ctx->used_cpus );
0209 
0210   /*
0211    * Ensure that worker A (and maybe C) acquired the thread wait lock of
0212    * worker B.
0213    */
0214   TicketLockWaitForOthers( &ctx->thread_queue->Lock, ctx->used_cpus - 1 );
0215 
0216   /*
0217    * Continue with the thread queue extraction.  The thread wait lock of
0218    * worker B will be changed back to the default thread wait lock.  This
0219    * will cause worker A (and maybe C) to release the thread queue lock and
0220    * acquire the default thread wait lock of worker B instead to carry out
0221    * the priority change.
0222    *
0223    * See also _Thread_Wait_acquire_critical().
0224    */
0225 }
0226 
0227 static void PriorityChangeWorker( rtems_task_argument arg )
0228 {
0229   Context          *ctx;
0230   SMP_barrier_State state;
0231 
0232   ctx = (Context *) arg;
0233   _SMP_barrier_State_initialize( &state );
0234 
0235   /* PC0 */
0236   _SMP_barrier_Wait( &ctx->barrier, &state, ctx->used_cpus );
0237 
0238   SetPriority( ctx->worker_b_id, PRIO_VERY_HIGH );
0239 
0240   /* PC1 */
0241   _SMP_barrier_Wait( &ctx->barrier, &state, ctx->used_cpus );
0242 
0243   (void) ReceiveAnyEvents();
0244 }
0245 
0246 static void MutexObtainWorker( rtems_task_argument arg )
0247 {
0248   Context *ctx;
0249 
0250   ctx = (Context *) arg;
0251 
0252   ObtainMutex( ctx->mutex_a_id );
0253 }
0254 
0255 static void DeadlockWorkerA( rtems_task_argument arg )
0256 {
0257   Context          *ctx;
0258   SMP_barrier_State state;
0259 
0260   ctx = (Context *) arg;
0261   _SMP_barrier_State_initialize( &state );
0262 
0263   ObtainMutex( ctx->mutex_a_id );
0264 
0265   /* D0 */
0266   _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
0267 
0268   /* D1 */
0269   _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
0270 
0271   ObtainMutexDeadlock( ctx->mutex_b_id );
0272 
0273   ReleaseMutex( ctx->mutex_a_id );
0274   SendEvents( ctx->runner_id, RTEMS_EVENT_0 );
0275   (void) ReceiveAnyEvents();
0276 }
0277 
0278 static void DeadlockWorkerB( rtems_task_argument arg )
0279 {
0280   Context *ctx;
0281 
0282   ctx = (Context *) arg;
0283 
0284   ObtainMutex( ctx->mutex_b_id );
0285   SendEvents( ctx->runner_id, RTEMS_EVENT_5 );
0286   ObtainMutex( ctx->mutex_c_id );
0287   ReleaseMutex( ctx->mutex_c_id );
0288   ReleaseMutex( ctx->mutex_b_id );
0289   SendEvents( ctx->runner_id, RTEMS_EVENT_1 );
0290   (void) ReceiveAnyEvents();
0291 }
0292 
0293 static void DeadlockWorkerC( rtems_task_argument arg )
0294 {
0295   Context *ctx;
0296 
0297   ctx = (Context *) arg;
0298 
0299   ObtainMutex( ctx->mutex_c_id );
0300   ObtainMutex( ctx->mutex_d_id );
0301   ReleaseMutex( ctx->mutex_d_id );
0302   ReleaseMutex( ctx->mutex_c_id );
0303   SendEvents( ctx->runner_id, RTEMS_EVENT_2 );
0304   (void) ReceiveAnyEvents();
0305 }
0306 
0307 static void DeadlockWorkerD( rtems_task_argument arg )
0308 {
0309   Context *ctx;
0310 
0311   ctx = (Context *) arg;
0312 
0313   ObtainMutex( ctx->mutex_d_id );
0314   ObtainMutex( ctx->mutex_a_id );
0315   ReleaseMutex( ctx->mutex_a_id );
0316   ReleaseMutex( ctx->mutex_d_id );
0317   SendEvents( ctx->runner_id, RTEMS_EVENT_3 );
0318   (void) ReceiveAnyEvents();
0319 }
0320 
0321 static void DeadlockWorkerE( rtems_task_argument arg )
0322 {
0323   Context *ctx;
0324 
0325   ctx = (Context *) arg;
0326 
0327   ObtainMutex( ctx->mutex_d_id );
0328   ReleaseMutex( ctx->mutex_d_id );
0329   SendEvents( ctx->runner_id, RTEMS_EVENT_4 );
0330   (void) ReceiveAnyEvents();
0331 }
0332 
0333 static void ScoreTqValSmp_Setup( ScoreTqValSmp_Context *ctx )
0334 {
0335   SetSelfPriority( PRIO_NORMAL );
0336 }
0337 
0338 static void ScoreTqValSmp_Setup_Wrap( void *arg )
0339 {
0340   ScoreTqValSmp_Context *ctx;
0341 
0342   ctx = arg;
0343   ScoreTqValSmp_Setup( ctx );
0344 }
0345 
0346 static void ScoreTqValSmp_Teardown( ScoreTqValSmp_Context *ctx )
0347 {
0348   RestoreRunnerPriority();
0349 }
0350 
0351 static void ScoreTqValSmp_Teardown_Wrap( void *arg )
0352 {
0353   ScoreTqValSmp_Context *ctx;
0354 
0355   ctx = arg;
0356   ScoreTqValSmp_Teardown( ctx );
0357 }
0358 
0359 static T_fixture ScoreTqValSmp_Fixture = {
0360   .setup = ScoreTqValSmp_Setup_Wrap,
0361   .stop = NULL,
0362   .teardown = ScoreTqValSmp_Teardown_Wrap,
0363   .scope = NULL,
0364   .initial_context = &ScoreTqValSmp_Instance
0365 };
0366 
0367 /**
0368  * @brief Create two or three worker threads and a mutex.  Use the mutex and
0369  *   the worker to do a thread priority change in parallel with a thread queue
0370  *   extraction.
0371  */
0372 static void ScoreTqValSmp_Action_0( ScoreTqValSmp_Context *ctx )
0373 {
0374   _SMP_barrier_Control_initialize( &ctx->barrier );
0375   _SMP_barrier_State_initialize( &ctx->barrier_state );
0376   WrapThreadQueueInitialize( &ctx->wrap, Extract, ctx );
0377 
0378   if ( rtems_scheduler_get_processor_maximum() > 2 ) {
0379     ctx->used_cpus = 3;
0380   } else {
0381     ctx->used_cpus = 2;
0382   }
0383 
0384   /*
0385    * Create a mutex and let the runner obtain it.
0386    */
0387   ctx->mutex_a_id = CreateMutexNoProtocol();
0388   ctx->thread_queue = GetMutexThreadQueue( ctx->mutex_a_id );
0389   ObtainMutex( ctx->mutex_a_id );
0390 
0391   /*
0392    * Create and start worker A on a second processor. mutex.  Let it wait on
0393    * the barrier.
0394    */
0395   ctx->worker_a_id = CreateTask( "WRKA", PRIO_NORMAL );
0396   SetScheduler( ctx->worker_a_id, SCHEDULER_B_ID, PRIO_NORMAL );
0397   StartTask( ctx->worker_a_id, PriorityChangeWorker, ctx );
0398 
0399   /*
0400    * If there are more than two processors, then create and start also worker
0401    * C.  Let it wait on the barrier.
0402    */
0403   if ( ctx->used_cpus > 2 ) {
0404     ctx->worker_c_id = CreateTask( "WRKC", PRIO_NORMAL );
0405     SetScheduler( ctx->worker_c_id, SCHEDULER_C_ID, PRIO_NORMAL );
0406     StartTask( ctx->worker_c_id, PriorityChangeWorker, ctx );
0407   }
0408 
0409   /*
0410    * Create and start worker B.  Let it try to obtain the mutex which is owned
0411    * by the runner.  Delete worker B to extract it from the thread queue. Wrap
0412    * the thread queue extract operation to do a parallel thread priority change
0413    * carried out by worker A (and maybe C).
0414    */
0415   ctx->worker_b_id = CreateTask( "WRKB", PRIO_HIGH );
0416   StartTask( ctx->worker_b_id, MutexObtainWorker, ctx );
0417   WrapThreadQueueExtractDirect( &ctx->wrap, GetThread( ctx->worker_b_id ) );
0418   DeleteTask( ctx->worker_b_id );
0419 
0420   /*
0421    * Clean up all used resources.
0422    */
0423   /* PC1 */
0424   _SMP_barrier_Wait( &ctx->barrier, &ctx->barrier_state, ctx->used_cpus );
0425 
0426   WaitForExecutionStop( ctx->worker_a_id );
0427   DeleteTask( ctx->worker_a_id );
0428 
0429   if ( ctx->used_cpus > 2 ) {
0430     WaitForExecutionStop( ctx->worker_c_id );
0431     DeleteTask( ctx->worker_c_id );
0432   }
0433 
0434   ReleaseMutex( ctx->mutex_a_id );
0435   DeleteMutex( ctx->mutex_a_id );
0436   WrapThreadQueueDestroy( &ctx->wrap );
0437 }
0438 
0439 /**
0440  * @brief Build a cyclic dependency graph using several worker threads and
0441  *   mutexes. Use the mutexes and the worker to construct a thread queue
0442  *   deadlock which is detected on one processor while it uses thread queue
0443  *   links inserted by another processor.  The runner thread controls the test
0444  *   scenario via the two thread queue locks.  This is an important test
0445  *   scenario which shows why the thread queue implementation is a bit more
0446  *   complicated in SMP configurations.
0447  */
0448 static void ScoreTqValSmp_Action_1( ScoreTqValSmp_Context *ctx )
0449 {
0450   Thread_queue_Queue *queue_b;
0451   Thread_queue_Queue *queue_c;
0452   ISR_lock_Context    lock_context;
0453   SMP_barrier_State   state;
0454 
0455   if ( rtems_scheduler_get_processor_maximum() <= 2 ) {
0456     /*
0457      * We can only run this validation test on systems with three or more
0458      * processors.  The sequence under test can happen on systems with only two
0459      * processors, however, we need a third processor to control the other two
0460      * processors via ISR locks to get a deterministic test scenario.
0461      */
0462     return;
0463   }
0464 
0465   ctx->runner_id = rtems_task_self();
0466 
0467   _SMP_barrier_Control_initialize( &ctx->barrier );
0468   _SMP_barrier_State_initialize( &state );
0469 
0470   ctx->mutex_a_id = CreateMutexNoProtocol();
0471   ctx->mutex_b_id = CreateMutexNoProtocol();
0472   ctx->mutex_c_id = CreateMutexNoProtocol();
0473   ctx->mutex_d_id = CreateMutexNoProtocol();
0474 
0475   queue_b = GetMutexThreadQueue( ctx->mutex_b_id );
0476   queue_c = GetMutexThreadQueue( ctx->mutex_c_id );
0477 
0478   ctx->worker_a_id = CreateTask( "WRKA", PRIO_NORMAL );
0479   ctx->worker_b_id = CreateTask( "WRKB", PRIO_NORMAL );
0480   ctx->worker_c_id = CreateTask( "WRKC", PRIO_NORMAL );
0481   ctx->worker_d_id = CreateTask( "WRKD", PRIO_NORMAL );
0482   ctx->worker_e_id = CreateTask( "WRKE", PRIO_NORMAL );
0483 
0484   SetScheduler( ctx->worker_a_id, SCHEDULER_B_ID, PRIO_NORMAL );
0485   SetScheduler( ctx->worker_b_id, SCHEDULER_B_ID, PRIO_HIGH );
0486   SetScheduler( ctx->worker_c_id, SCHEDULER_B_ID, PRIO_HIGH );
0487   SetScheduler( ctx->worker_d_id, SCHEDULER_B_ID, PRIO_HIGH );
0488   SetScheduler( ctx->worker_e_id, SCHEDULER_C_ID, PRIO_NORMAL );
0489 
0490   /*
0491    * Let worker D wait for mutex A.  Let worker C wait for mutex D.  Let worker
0492    * B wait for mutex C.
0493    */
0494   StartTask( ctx->worker_a_id, DeadlockWorkerA, ctx );
0495 
0496   /* D0 */
0497   _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
0498 
0499   StartTask( ctx->worker_d_id, DeadlockWorkerD, ctx );
0500   StartTask( ctx->worker_c_id, DeadlockWorkerC, ctx );
0501   StartTask( ctx->worker_b_id, DeadlockWorkerB, ctx );
0502   ReceiveAllEvents( RTEMS_EVENT_5 );
0503   WaitForExecutionStop( ctx->worker_b_id );
0504 
0505   /*
0506    * Let worker A attempt to obtain mutex B.  Let worker A wait on the lock of
0507    * mutex C.  Worker A will insert two thread queue links.
0508    */
0509   _ISR_lock_ISR_disable( &lock_context );
0510   _Thread_queue_Queue_acquire_critical(
0511     queue_c,
0512     &_Thread_Executing->Potpourri_stats,
0513     &lock_context
0514   );
0515   _ISR_lock_ISR_enable( &lock_context );
0516 
0517   /* D1 */
0518   _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
0519 
0520   TicketLockWaitForOthers( &queue_c->Lock, 1 );
0521 
0522   /*
0523    * Let worker E try to obtain mutex D.  Worker E will add a thread queue link
0524    * which is later used by worker A to detect the deadlock.
0525    */
0526   StartTask( ctx->worker_e_id, DeadlockWorkerE, ctx );
0527   TicketLockWaitForOthers( &queue_b->Lock, 1 );
0528 
0529   /*
0530    * Let worker A continue the obtain sequence.  It will detect a deadlock.
0531    */
0532   _ISR_lock_ISR_disable( &lock_context );
0533   _Thread_queue_Queue_release( queue_c, &lock_context );
0534 
0535   /*
0536    * Clean up all used resources.
0537    */
0538   ReceiveAllEvents(
0539     RTEMS_EVENT_0 | RTEMS_EVENT_1 | RTEMS_EVENT_2 | RTEMS_EVENT_3 |
0540     RTEMS_EVENT_4
0541   );
0542   WaitForExecutionStop( ctx->worker_a_id );
0543   WaitForExecutionStop( ctx->worker_b_id );
0544   WaitForExecutionStop( ctx->worker_c_id );
0545   WaitForExecutionStop( ctx->worker_d_id );
0546   WaitForExecutionStop( ctx->worker_e_id );
0547   DeleteTask( ctx->worker_a_id );
0548   DeleteTask( ctx->worker_b_id );
0549   DeleteTask( ctx->worker_c_id );
0550   DeleteTask( ctx->worker_d_id );
0551   DeleteTask( ctx->worker_e_id );
0552   DeleteMutex( ctx->mutex_a_id );
0553   DeleteMutex( ctx->mutex_b_id );
0554   DeleteMutex( ctx->mutex_c_id );
0555   DeleteMutex( ctx->mutex_d_id );
0556 }
0557 
0558 /**
0559  * @fn void T_case_body_ScoreTqValSmp( void )
0560  */
0561 T_TEST_CASE_FIXTURE( ScoreTqValSmp, &ScoreTqValSmp_Fixture )
0562 {
0563   ScoreTqValSmp_Context *ctx;
0564 
0565   ctx = T_fixture_context();
0566 
0567   ScoreTqValSmp_Action_0( ctx );
0568   ScoreTqValSmp_Action_1( ctx );
0569 }
0570 
0571 /** @} */