Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:24:52

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /**
0004  * @file
0005  *
0006  * @ingroup RtemsSemReqMrspObtain
0007  */
0008 
0009 /*
0010  * Copyright (C) 2021 embedded brains GmbH & Co. KG
0011  *
0012  * Redistribution and use in source and binary forms, with or without
0013  * modification, are permitted provided that the following conditions
0014  * are met:
0015  * 1. Redistributions of source code must retain the above copyright
0016  *    notice, this list of conditions and the following disclaimer.
0017  * 2. Redistributions in binary form must reproduce the above copyright
0018  *    notice, this list of conditions and the following disclaimer in the
0019  *    documentation and/or other materials provided with the distribution.
0020  *
0021  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0022  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0023  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0024  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
0025  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0026  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0027  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0028  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0029  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0030  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
0031  * POSSIBILITY OF SUCH DAMAGE.
0032  */
0033 
0034 /*
0035  * This file is part of the RTEMS quality process and was automatically
0036  * generated.  If you find something that needs to be fixed or
0037  * worded better please post a report or patch to an RTEMS mailing list
0038  * or raise a bug report:
0039  *
0040  * https://www.rtems.org/bugs.html
0041  *
0042  * For information on updating and regenerating please refer to the How-To
0043  * section in the Software Requirements Engineering chapter of the
0044  * RTEMS Software Engineering manual.  The manual is provided as a part of
0045  * a release.  For development sources please refer to the online
0046  * documentation at:
0047  *
0048  * https://docs.rtems.org
0049  */
0050 
0051 #ifdef HAVE_CONFIG_H
0052 #include "config.h"
0053 #endif
0054 
0055 #include <rtems.h>
0056 #include <string.h>
0057 #include <rtems/score/percpu.h>
0058 #include <rtems/score/threadimpl.h>
0059 
0060 #include "tx-support.h"
0061 #include "tx-thread-queue.h"
0062 
0063 #include <rtems/test.h>
0064 
0065 /**
0066  * @defgroup RtemsSemReqMrspObtain spec:/rtems/sem/req/mrsp-obtain
0067  *
0068  * @ingroup TestsuitesValidationSmpOnly0
0069  *
0070  * @{
0071  */
0072 
0073 typedef enum {
0074   RtemsSemReqMrspObtain_Pre_Home_Idle,
0075   RtemsSemReqMrspObtain_Pre_Home_Task,
0076   RtemsSemReqMrspObtain_Pre_Home_TaskIdle,
0077   RtemsSemReqMrspObtain_Pre_Home_Second,
0078   RtemsSemReqMrspObtain_Pre_Home_SecondIdle,
0079   RtemsSemReqMrspObtain_Pre_Home_NA
0080 } RtemsSemReqMrspObtain_Pre_Home;
0081 
0082 typedef enum {
0083   RtemsSemReqMrspObtain_Pre_Helping_Idle,
0084   RtemsSemReqMrspObtain_Pre_Helping_Task,
0085   RtemsSemReqMrspObtain_Pre_Helping_Helping,
0086   RtemsSemReqMrspObtain_Pre_Helping_HelpingIdle,
0087   RtemsSemReqMrspObtain_Pre_Helping_Third,
0088   RtemsSemReqMrspObtain_Pre_Helping_ThirdIdle,
0089   RtemsSemReqMrspObtain_Pre_Helping_NA
0090 } RtemsSemReqMrspObtain_Pre_Helping;
0091 
0092 typedef enum {
0093   RtemsSemReqMrspObtain_Pre_PriorityHome_None,
0094   RtemsSemReqMrspObtain_Pre_PriorityHome_NewHigh,
0095   RtemsSemReqMrspObtain_Pre_PriorityHome_NewEqual,
0096   RtemsSemReqMrspObtain_Pre_PriorityHome_SecondHigh,
0097   RtemsSemReqMrspObtain_Pre_PriorityHome_SecondEqual,
0098   RtemsSemReqMrspObtain_Pre_PriorityHome_SecondLow,
0099   RtemsSemReqMrspObtain_Pre_PriorityHome_NA
0100 } RtemsSemReqMrspObtain_Pre_PriorityHome;
0101 
0102 typedef enum {
0103   RtemsSemReqMrspObtain_Pre_PriorityHelping_None,
0104   RtemsSemReqMrspObtain_Pre_PriorityHelping_Helping,
0105   RtemsSemReqMrspObtain_Pre_PriorityHelping_ThirdHigh,
0106   RtemsSemReqMrspObtain_Pre_PriorityHelping_ThirdEqual,
0107   RtemsSemReqMrspObtain_Pre_PriorityHelping_ThirdLow,
0108   RtemsSemReqMrspObtain_Pre_PriorityHelping_NA
0109 } RtemsSemReqMrspObtain_Pre_PriorityHelping;
0110 
0111 typedef enum {
0112   RtemsSemReqMrspObtain_Post_Home_Task,
0113   RtemsSemReqMrspObtain_Post_Home_TaskIdle,
0114   RtemsSemReqMrspObtain_Post_Home_Second,
0115   RtemsSemReqMrspObtain_Post_Home_SecondIdle,
0116   RtemsSemReqMrspObtain_Post_Home_NA
0117 } RtemsSemReqMrspObtain_Post_Home;
0118 
0119 typedef enum {
0120   RtemsSemReqMrspObtain_Post_Helping_Idle,
0121   RtemsSemReqMrspObtain_Post_Helping_Task,
0122   RtemsSemReqMrspObtain_Post_Helping_TaskIdle,
0123   RtemsSemReqMrspObtain_Post_Helping_Helping,
0124   RtemsSemReqMrspObtain_Post_Helping_HelpingIdle,
0125   RtemsSemReqMrspObtain_Post_Helping_Third,
0126   RtemsSemReqMrspObtain_Post_Helping_ThirdIdle,
0127   RtemsSemReqMrspObtain_Post_Helping_NA
0128 } RtemsSemReqMrspObtain_Post_Helping;
0129 
0130 typedef struct {
0131   uint16_t Skip : 1;
0132   uint16_t Pre_Home_NA : 1;
0133   uint16_t Pre_Helping_NA : 1;
0134   uint16_t Pre_PriorityHome_NA : 1;
0135   uint16_t Pre_PriorityHelping_NA : 1;
0136   uint16_t Post_Home : 3;
0137   uint16_t Post_Helping : 3;
0138 } RtemsSemReqMrspObtain_Entry;
0139 
0140 /**
0141  * @brief Test context for spec:/rtems/sem/req/mrsp-obtain test case.
0142  */
0143 typedef struct {
0144   /**
0145    * @brief This member contains the thread queue test context.
0146    */
0147   TQContext tq_ctx;
0148 
0149   /**
0150    * @brief This member contains the MrsP semaphore to obtain.
0151    */
0152   rtems_id sema_id;
0153 
0154   /**
0155    * @brief This member specifies the scheduler on which the task executes.
0156    */
0157   rtems_id task_scheduler;
0158 
0159   /**
0160    * @brief If this member is true, then the task shall already own a MrsP
0161    *   semaphore.
0162    */
0163   bool task_owns_mrsp_semaphore;
0164 
0165   /**
0166    * @brief If this member is true, then an idle task shall execute on
0167    *   scheduler A.
0168    */
0169   bool scheduler_a_idle;
0170 
0171   /**
0172    * @brief If this member is true, then an idle task shall execute on
0173    *   scheduler B.
0174    */
0175   bool scheduler_b_idle;
0176 
0177   /**
0178    * @brief If this member is true, then the second task shall be active.
0179    */
0180   bool second_active;
0181 
0182   /**
0183    * @brief This member specifies the priority of the second task.
0184    */
0185   rtems_task_priority second_priority;
0186 
0187   /**
0188    * @brief If this member is true, then the third task shall be active.
0189    */
0190   bool third_active;
0191 
0192   /**
0193    * @brief This member specifies the priority of the third task.
0194    */
0195   rtems_task_priority third_priority;
0196 
0197   /**
0198    * @brief If this member is true, then the helping task shall be active.
0199    */
0200   bool helping_active;
0201 
0202   /**
0203    * @brief This member specifies the priority of the MrsP semaphore with
0204    *   respect to scheduler A.
0205    */
0206   rtems_task_priority sema_priority_scheduler_a;
0207 
0208   /**
0209    * @brief This member specifies the priority of the MrsP semaphore with
0210    *   respect to scheduler B.
0211    */
0212   rtems_task_priority sema_priority_scheduler_b;
0213 
0214   struct {
0215     /**
0216      * @brief This member defines the pre-condition states for the next action.
0217      */
0218     size_t pcs[ 4 ];
0219 
0220     /**
0221      * @brief If this member is true, then the test action loop is executed.
0222      */
0223     bool in_action_loop;
0224 
0225     /**
0226      * @brief This member contains the next transition map index.
0227      */
0228     size_t index;
0229 
0230     /**
0231      * @brief This member contains the current transition map entry.
0232      */
0233     RtemsSemReqMrspObtain_Entry entry;
0234 
0235     /**
0236      * @brief If this member is true, then the current transition variant
0237      *   should be skipped.
0238      */
0239     bool skip;
0240   } Map;
0241 } RtemsSemReqMrspObtain_Context;
0242 
0243 static RtemsSemReqMrspObtain_Context
0244   RtemsSemReqMrspObtain_Instance;
0245 
0246 static const char * const RtemsSemReqMrspObtain_PreDesc_Home[] = {
0247   "Idle",
0248   "Task",
0249   "TaskIdle",
0250   "Second",
0251   "SecondIdle",
0252   "NA"
0253 };
0254 
0255 static const char * const RtemsSemReqMrspObtain_PreDesc_Helping[] = {
0256   "Idle",
0257   "Task",
0258   "Helping",
0259   "HelpingIdle",
0260   "Third",
0261   "ThirdIdle",
0262   "NA"
0263 };
0264 
0265 static const char * const RtemsSemReqMrspObtain_PreDesc_PriorityHome[] = {
0266   "None",
0267   "NewHigh",
0268   "NewEqual",
0269   "SecondHigh",
0270   "SecondEqual",
0271   "SecondLow",
0272   "NA"
0273 };
0274 
0275 static const char * const RtemsSemReqMrspObtain_PreDesc_PriorityHelping[] = {
0276   "None",
0277   "Helping",
0278   "ThirdHigh",
0279   "ThirdEqual",
0280   "ThirdLow",
0281   "NA"
0282 };
0283 
0284 static const char * const * const RtemsSemReqMrspObtain_PreDesc[] = {
0285   RtemsSemReqMrspObtain_PreDesc_Home,
0286   RtemsSemReqMrspObtain_PreDesc_Helping,
0287   RtemsSemReqMrspObtain_PreDesc_PriorityHome,
0288   RtemsSemReqMrspObtain_PreDesc_PriorityHelping,
0289   NULL
0290 };
0291 
0292 #define HELPING TQ_BLOCKER_A
0293 
0294 #define SECOND TQ_BLOCKER_B
0295 
0296 #define THIRD TQ_BLOCKER_C
0297 
0298 #define ASSISTANT TQ_BLOCKER_D
0299 
0300 #define MOVER TQ_BLOCKER_E
0301 
0302 typedef RtemsSemReqMrspObtain_Context Context;
0303 
0304 static void SetSemaphorePriority(
0305   rtems_id            id,
0306   rtems_task_priority priority_a,
0307   rtems_task_priority priority_b
0308 )
0309 {
0310   rtems_status_code   sc;
0311   rtems_task_priority priority;
0312 
0313   sc = rtems_semaphore_set_priority(
0314     id,
0315     SCHEDULER_A_ID,
0316     priority_a,
0317     &priority
0318   );
0319   T_rsc_success( sc );
0320 
0321   sc = rtems_semaphore_set_priority(
0322     id,
0323     SCHEDULER_B_ID,
0324     priority_b,
0325     &priority
0326   );
0327   T_rsc_success( sc );
0328 }
0329 
0330 static void MoveToScheduler( Context *ctx, rtems_id scheduler_id )
0331 {
0332   rtems_id other_scheduler_id;
0333   uint32_t cpu;
0334 
0335   if ( scheduler_id == SCHEDULER_A_ID ) {
0336     other_scheduler_id =  SCHEDULER_B_ID;
0337     cpu = 0;
0338   } else {
0339     other_scheduler_id =  SCHEDULER_A_ID;
0340     cpu = 1;
0341   }
0342 
0343   TQSetScheduler( &ctx->tq_ctx, MOVER, other_scheduler_id, PRIO_VERY_HIGH );
0344   ctx->tq_ctx.busy_wait[ MOVER ] = true;
0345   TQSend( &ctx->tq_ctx, MOVER, TQ_EVENT_BUSY_WAIT );
0346   TQWaitForEventsReceived( &ctx->tq_ctx, MOVER );
0347   T_eq_u32( rtems_scheduler_get_processor(), cpu );
0348   ctx->tq_ctx.busy_wait[ MOVER ] = false;
0349   TQWaitForExecutionStop( &ctx->tq_ctx, MOVER );
0350 }
0351 
0352 static void RtemsSemReqMrspObtain_Pre_Home_Prepare(
0353   RtemsSemReqMrspObtain_Context *ctx,
0354   RtemsSemReqMrspObtain_Pre_Home state
0355 )
0356 {
0357   switch ( state ) {
0358     case RtemsSemReqMrspObtain_Pre_Home_Idle: {
0359       /*
0360        * While an idle task executes on the processor owned by the home
0361        * scheduler of the obtaining task.
0362        */
0363       ctx->scheduler_a_idle = true;
0364       break;
0365     }
0366 
0367     case RtemsSemReqMrspObtain_Pre_Home_Task: {
0368       /*
0369        * While the obtaining task executes on the processor owned by the home
0370        * scheduler of the obtaining task.
0371        */
0372       ctx->task_scheduler = SCHEDULER_A_ID;
0373       break;
0374     }
0375 
0376     case RtemsSemReqMrspObtain_Pre_Home_TaskIdle: {
0377       /*
0378        * While an idle task on behalf of the obtaining task executes on the
0379        * processor owned by the home scheduler of the obtaining task.
0380        */
0381       ctx->scheduler_a_idle = true;
0382       break;
0383     }
0384 
0385     case RtemsSemReqMrspObtain_Pre_Home_Second: {
0386       /*
0387        * While the second task executes on the processor owned by the home
0388        * scheduler of the obtaining task.
0389        */
0390       ctx->second_active = true;
0391       break;
0392     }
0393 
0394     case RtemsSemReqMrspObtain_Pre_Home_SecondIdle: {
0395       /*
0396        * While an idle task on behalf of the second task executes on the
0397        * processor owned by the home scheduler of the obtaining task.
0398        */
0399       ctx->second_active = true;
0400       ctx->scheduler_a_idle = true;
0401       break;
0402     }
0403 
0404     case RtemsSemReqMrspObtain_Pre_Home_NA:
0405       break;
0406   }
0407 }
0408 
0409 static void RtemsSemReqMrspObtain_Pre_Helping_Prepare(
0410   RtemsSemReqMrspObtain_Context    *ctx,
0411   RtemsSemReqMrspObtain_Pre_Helping state
0412 )
0413 {
0414   switch ( state ) {
0415     case RtemsSemReqMrspObtain_Pre_Helping_Idle: {
0416       /*
0417        * While an idle task executes on the processor owned by the helping
0418        * scheduler of the obtaining task.
0419        */
0420       ctx->scheduler_b_idle = true;
0421       break;
0422     }
0423 
0424     case RtemsSemReqMrspObtain_Pre_Helping_Task: {
0425       /*
0426        * While the obtaining task executes on the processor owned by the
0427        * helping scheduler of the obtaining task.
0428        */
0429       ctx->task_scheduler = SCHEDULER_B_ID;
0430       break;
0431     }
0432 
0433     case RtemsSemReqMrspObtain_Pre_Helping_Helping: {
0434       /*
0435        * While a helping task of the obtaining task executes on the processor
0436        * owned by the helping scheduler of the obtaining task.
0437        */
0438       ctx->task_owns_mrsp_semaphore = true;
0439       ctx->helping_active = true;
0440       break;
0441     }
0442 
0443     case RtemsSemReqMrspObtain_Pre_Helping_HelpingIdle: {
0444       /*
0445        * While an idle task on behalf of a helping task of the obtaining task
0446        * executes on the processor owned by the helping scheduler of the
0447        * obtaining task.
0448        */
0449       ctx->task_owns_mrsp_semaphore = true;
0450       ctx->helping_active = true;
0451       ctx->scheduler_b_idle = true;
0452       break;
0453     }
0454 
0455     case RtemsSemReqMrspObtain_Pre_Helping_Third: {
0456       /*
0457        * While the third task executes on the processor owned by the helping
0458        * scheduler of the obtaining task.
0459        */
0460       ctx->third_active = true;
0461       break;
0462     }
0463 
0464     case RtemsSemReqMrspObtain_Pre_Helping_ThirdIdle: {
0465       /*
0466        * While an idle task on behalf of the third task executes on the
0467        * processor owned by the helping scheduler of the obtaining task.
0468        */
0469       ctx->third_active = true;
0470       ctx->scheduler_b_idle = true;
0471       break;
0472     }
0473 
0474     case RtemsSemReqMrspObtain_Pre_Helping_NA:
0475       break;
0476   }
0477 }
0478 
0479 static void RtemsSemReqMrspObtain_Pre_PriorityHome_Prepare(
0480   RtemsSemReqMrspObtain_Context         *ctx,
0481   RtemsSemReqMrspObtain_Pre_PriorityHome state
0482 )
0483 {
0484   switch ( state ) {
0485     case RtemsSemReqMrspObtain_Pre_PriorityHome_None: {
0486       /*
0487        * While no ceiling priority with respect to the home scheduler of the
0488        * obtaining task is already available to the task.
0489        */
0490       ctx->second_priority = PRIO_HIGH;
0491       ctx->sema_priority_scheduler_a = PRIO_NORMAL;
0492       break;
0493     }
0494 
0495     case RtemsSemReqMrspObtain_Pre_PriorityHome_NewHigh: {
0496       /*
0497        * While the ceiling priority of the semaphore with respect to the home
0498        * scheduler of the obtaining task is higher than the ceiling priorities
0499        * already available to the task.
0500        */
0501       ctx->task_owns_mrsp_semaphore = true;
0502       ctx->sema_priority_scheduler_a = PRIO_HIGH;
0503       break;
0504     }
0505 
0506     case RtemsSemReqMrspObtain_Pre_PriorityHome_NewEqual: {
0507       /*
0508        * While the ceiling priority of the semaphore with respect to the home
0509        * scheduler of the obtaining task is equal to the ceiling priorities
0510        * already available to the task.
0511        */
0512       ctx->task_owns_mrsp_semaphore = true;
0513       ctx->sema_priority_scheduler_a = PRIO_NORMAL;
0514       break;
0515     }
0516 
0517     case RtemsSemReqMrspObtain_Pre_PriorityHome_SecondHigh: {
0518       /*
0519        * While the ceiling priority of the semaphore with respect to the home
0520        * scheduler of the obtaining task is higher than the priority of the
0521        * second task.
0522        */
0523       ctx->second_priority = PRIO_HIGH;
0524       ctx->sema_priority_scheduler_a = PRIO_VERY_HIGH;
0525       break;
0526     }
0527 
0528     case RtemsSemReqMrspObtain_Pre_PriorityHome_SecondEqual: {
0529       /*
0530        * While the ceiling priority of the semaphore with respect to the home
0531        * scheduler of the obtaining task is equal to the priority of the second
0532        * task.
0533        */
0534       ctx->second_priority = PRIO_HIGH;
0535       ctx->sema_priority_scheduler_a = PRIO_HIGH;
0536       break;
0537     }
0538 
0539     case RtemsSemReqMrspObtain_Pre_PriorityHome_SecondLow: {
0540       /*
0541        * While the ceiling priority of the semaphore with respect to the home
0542        * scheduler of the obtaining task is lower than the priority of the
0543        * second task.
0544        */
0545       ctx->second_priority = PRIO_HIGH;
0546       ctx->sema_priority_scheduler_a = PRIO_NORMAL;
0547       break;
0548     }
0549 
0550     case RtemsSemReqMrspObtain_Pre_PriorityHome_NA:
0551       break;
0552   }
0553 }
0554 
0555 static void RtemsSemReqMrspObtain_Pre_PriorityHelping_Prepare(
0556   RtemsSemReqMrspObtain_Context            *ctx,
0557   RtemsSemReqMrspObtain_Pre_PriorityHelping state
0558 )
0559 {
0560   switch ( state ) {
0561     case RtemsSemReqMrspObtain_Pre_PriorityHelping_None: {
0562       /*
0563        * While no ceiling priority with respect to the helping scheduler of the
0564        * obtaining task is already available to the task.
0565        */
0566       ctx->sema_priority_scheduler_b = PRIO_NORMAL;
0567       break;
0568     }
0569 
0570     case RtemsSemReqMrspObtain_Pre_PriorityHelping_Helping: {
0571       /*
0572        * While ceiling priorities with respect to the helping scheduler of the
0573        * obtaining task are already available to the task.
0574        */
0575       ctx->helping_active = true;
0576       ctx->task_owns_mrsp_semaphore = true;
0577       ctx->sema_priority_scheduler_b = PRIO_NORMAL;
0578       break;
0579     }
0580 
0581     case RtemsSemReqMrspObtain_Pre_PriorityHelping_ThirdHigh: {
0582       /*
0583        * While the ceiling priority of the semaphore with respect to the
0584        * helping scheduler of the obtaining task is higher than the priority of
0585        * the third task.
0586        */
0587       ctx->third_priority = PRIO_LOW;
0588       ctx->sema_priority_scheduler_b = PRIO_NORMAL;
0589       break;
0590     }
0591 
0592     case RtemsSemReqMrspObtain_Pre_PriorityHelping_ThirdEqual: {
0593       /*
0594        * While the ceiling priority of the semaphore with respect to the
0595        * helping scheduler of the obtaining task is equal to the priority of
0596        * the third task.
0597        */
0598       ctx->third_priority = PRIO_NORMAL;
0599       ctx->sema_priority_scheduler_b = PRIO_NORMAL;
0600       break;
0601     }
0602 
0603     case RtemsSemReqMrspObtain_Pre_PriorityHelping_ThirdLow: {
0604       /*
0605        * While the ceiling priority of the semaphore with respect to the
0606        * helping scheduler of the obtaining task is lower than the priority of
0607        * the third task.
0608        */
0609       ctx->third_priority = PRIO_HIGH;
0610       ctx->sema_priority_scheduler_b = PRIO_NORMAL;
0611       break;
0612     }
0613 
0614     case RtemsSemReqMrspObtain_Pre_PriorityHelping_NA:
0615       break;
0616   }
0617 }
0618 
0619 static void RtemsSemReqMrspObtain_Post_Home_Check(
0620   RtemsSemReqMrspObtain_Context  *ctx,
0621   RtemsSemReqMrspObtain_Post_Home state
0622 )
0623 {
0624   const Per_CPU_Control *cpu;
0625   const Thread_Control  *scheduled;
0626   const Scheduler_Node  *scheduler_node;
0627   uint32_t               task_cpu_index;
0628 
0629   cpu = _Per_CPU_Get_by_index( 0 );
0630   scheduled = cpu->heir;
0631   task_cpu_index = rtems_scheduler_get_processor(); 
0632 
0633   switch ( state ) {
0634     case RtemsSemReqMrspObtain_Post_Home_Task: {
0635       /*
0636        * The obtaining task shall execute on the processor owned by the home
0637        * scheduler of the obtaining task.
0638        */
0639       T_eq_u32( task_cpu_index, 0 );
0640       T_eq_ptr( scheduled, ctx->tq_ctx.runner_tcb );
0641       break;
0642     }
0643 
0644     case RtemsSemReqMrspObtain_Post_Home_TaskIdle: {
0645       /*
0646        * An idle task on behalf of the obtaining task shall execute on the
0647        * processor owned by the home scheduler of the obtaining task.
0648        */
0649       T_eq_u32( task_cpu_index, 1 );
0650       T_true( scheduled->is_idle );
0651       scheduler_node = _Thread_Scheduler_get_node_by_index(
0652         ctx->tq_ctx.runner_tcb,
0653         0
0654       );
0655       T_eq_ptr( scheduler_node->user, scheduled );
0656       break;
0657     }
0658 
0659     case RtemsSemReqMrspObtain_Post_Home_Second: {
0660       /*
0661        * The second task shall execute on the processor owned by the home
0662        * scheduler of the obtaining task.
0663        */
0664       T_eq_u32( task_cpu_index, 1 );
0665       T_eq_ptr( scheduled, ctx->tq_ctx.worker_tcb[ SECOND ] );
0666       break;
0667     }
0668 
0669     case RtemsSemReqMrspObtain_Post_Home_SecondIdle: {
0670       /*
0671        * An idle task on behalf of the second task shall execute on the
0672        * processor owned by the home scheduler of the obtaining task.
0673        */
0674       T_eq_u32( task_cpu_index, 1 );
0675       T_true( scheduled->is_idle );
0676       scheduler_node = _Thread_Scheduler_get_node_by_index(
0677         ctx->tq_ctx.worker_tcb[ SECOND ],
0678         0
0679       );
0680       T_eq_ptr( scheduler_node->user, scheduled );
0681       break;
0682     }
0683 
0684     case RtemsSemReqMrspObtain_Post_Home_NA:
0685       break;
0686   }
0687 }
0688 
0689 static void RtemsSemReqMrspObtain_Post_Helping_Check(
0690   RtemsSemReqMrspObtain_Context     *ctx,
0691   RtemsSemReqMrspObtain_Post_Helping state
0692 )
0693 {
0694   const Per_CPU_Control *cpu;
0695   const Thread_Control  *scheduled;
0696   const Scheduler_Node  *scheduler_node;
0697   uint32_t               task_cpu_index;
0698 
0699   cpu = _Per_CPU_Get_by_index( 1 );
0700   scheduled = cpu->heir;
0701   task_cpu_index = rtems_scheduler_get_processor(); 
0702 
0703   switch ( state ) {
0704     case RtemsSemReqMrspObtain_Post_Helping_Idle: {
0705       /*
0706        * An idle task shall execute on the processor owned by the helping
0707        * scheduler of the obtaining task.
0708        */
0709       T_eq_u32( task_cpu_index, 0 );
0710       T_true( scheduled->is_idle );
0711       break;
0712     }
0713 
0714     case RtemsSemReqMrspObtain_Post_Helping_Task: {
0715       /*
0716        * The obtaining task shall execute on the processor owned by the helping
0717        * scheduler of the obtaining task.
0718        */
0719       T_eq_u32( task_cpu_index, 1 );
0720       T_eq_ptr( scheduled, ctx->tq_ctx.runner_tcb );
0721       break;
0722     }
0723 
0724     case RtemsSemReqMrspObtain_Post_Helping_TaskIdle: {
0725       /*
0726        * An idle task on behalf of the obtaining task shall execute on the
0727        * processor owned by the helping scheduler of the obtaining task.
0728        */
0729       T_eq_u32( task_cpu_index, 0 );
0730       T_true( scheduled->is_idle );
0731       scheduler_node = _Thread_Scheduler_get_node_by_index(
0732         ctx->tq_ctx.runner_tcb,
0733         1
0734       );
0735       T_eq_ptr( scheduler_node->user, ctx->tq_ctx.runner_tcb );
0736       break;
0737     }
0738 
0739     case RtemsSemReqMrspObtain_Post_Helping_Helping: {
0740       /*
0741        * The helping task shall execute on the processor owned by the helping
0742        * scheduler of the obtaining task.
0743        */
0744       T_eq_u32( task_cpu_index, 0 );
0745       T_eq_ptr( scheduled, ctx->tq_ctx.worker_tcb[ HELPING ] );
0746       break;
0747     }
0748 
0749     case RtemsSemReqMrspObtain_Post_Helping_HelpingIdle: {
0750       /*
0751        * An idle task on behalf of the helping task shall execute on the
0752        * processor owned by the helping scheduler of the obtaining task.
0753        */
0754       T_eq_u32( task_cpu_index, 0 );
0755       T_true( scheduled->is_idle );
0756       scheduler_node = _Thread_Scheduler_get_node_by_index(
0757         ctx->tq_ctx.worker_tcb[ HELPING ],
0758         1
0759       );
0760       T_eq_ptr( scheduler_node->user, scheduled );
0761       break;
0762     }
0763 
0764     case RtemsSemReqMrspObtain_Post_Helping_Third: {
0765       /*
0766        * The third task shall execute on the processor owned by the helping
0767        * scheduler of the obtaining task.
0768        */
0769       T_eq_u32( task_cpu_index, 0 );
0770       T_eq_ptr( scheduled, ctx->tq_ctx.worker_tcb[ THIRD ] );
0771       break;
0772     }
0773 
0774     case RtemsSemReqMrspObtain_Post_Helping_ThirdIdle: {
0775       /*
0776        * An idle task on behalf of the third task shall execute on the
0777        * processor owned by the helping scheduler of the obtaining task.
0778        */
0779       T_eq_u32( task_cpu_index, 0 );
0780       scheduler_node = _Thread_Scheduler_get_node_by_index(
0781         ctx->tq_ctx.worker_tcb[ THIRD ],
0782         1
0783       );
0784       T_eq_ptr( scheduler_node->user, scheduled );
0785       break;
0786     }
0787 
0788     case RtemsSemReqMrspObtain_Post_Helping_NA:
0789       break;
0790   }
0791 }
0792 
0793 static void RtemsSemReqMrspObtain_Setup( RtemsSemReqMrspObtain_Context *ctx )
0794 {
0795   rtems_status_code sc;
0796   rtems_id          mutex_b;
0797   rtems_id          mutex_c;
0798 
0799   memset( ctx, 0, sizeof( *ctx ) );
0800 
0801   sc = rtems_semaphore_create(
0802     rtems_build_name( 'S', 'E', 'M', 'A' ),
0803     1,
0804     RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY |
0805       RTEMS_MULTIPROCESSOR_RESOURCE_SHARING,
0806     PRIO_NORMAL,
0807     &ctx->sema_id
0808   );
0809   T_rsc_success( sc );
0810 
0811   ctx->tq_ctx.deadlock = TQ_DEADLOCK_STATUS;
0812   ctx->tq_ctx.enqueue_prepare = TQEnqueuePrepareDefault;
0813   ctx->tq_ctx.enqueue_done = TQEnqueueDoneDefault;
0814   ctx->tq_ctx.enqueue = TQEnqueueClassicSem;
0815   ctx->tq_ctx.surrender = TQSurrenderClassicSem;
0816   ctx->tq_ctx.get_owner = TQGetOwnerClassicSem;
0817   ctx->tq_ctx.convert_status = TQConvertStatusClassic;
0818   TQInitialize( &ctx->tq_ctx );
0819 
0820   DeleteMutex( ctx->tq_ctx.mutex_id[ TQ_MUTEX_B ] );
0821   DeleteMutex( ctx->tq_ctx.mutex_id[ TQ_MUTEX_C ] );
0822 
0823   mutex_b = 0;
0824   sc = rtems_semaphore_create(
0825     rtems_build_name( 'M', 'T', 'X', 'B' ),
0826     1,
0827     RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY |
0828       RTEMS_MULTIPROCESSOR_RESOURCE_SHARING,
0829     PRIO_NORMAL,
0830     &mutex_b
0831   );
0832   T_rsc_success( sc );
0833 
0834   mutex_c = 0;
0835   sc = rtems_semaphore_create(
0836     rtems_build_name( 'M', 'T', 'X', 'C' ),
0837     1,
0838     RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY |
0839       RTEMS_MULTIPROCESSOR_RESOURCE_SHARING,
0840     PRIO_NORMAL,
0841     &mutex_c
0842   );
0843   T_rsc_success( sc );
0844 
0845   ctx->tq_ctx.mutex_id[ TQ_MUTEX_B ] = mutex_b;
0846   ctx->tq_ctx.mutex_id[ TQ_MUTEX_C ] = mutex_c;
0847 
0848   TQSetScheduler( &ctx->tq_ctx, HELPING, SCHEDULER_B_ID, PRIO_VERY_LOW );
0849   TQSetScheduler( &ctx->tq_ctx, THIRD, SCHEDULER_B_ID, PRIO_NORMAL );
0850 
0851   TQMutexObtain( &ctx->tq_ctx, TQ_MUTEX_A );
0852   TQSetScheduler( &ctx->tq_ctx, ASSISTANT, SCHEDULER_B_ID, PRIO_VERY_LOW );
0853   TQSendAndWaitForExecutionStop(
0854     &ctx->tq_ctx,
0855     ASSISTANT,
0856     TQ_EVENT_MUTEX_A_OBTAIN
0857   );
0858 
0859   SetSemaphorePriority(
0860     ctx->tq_ctx.mutex_id[ TQ_MUTEX_B ],
0861     PRIO_NORMAL,
0862     PRIO_VERY_LOW
0863   );
0864 }
0865 
0866 static void RtemsSemReqMrspObtain_Setup_Wrap( void *arg )
0867 {
0868   RtemsSemReqMrspObtain_Context *ctx;
0869 
0870   ctx = arg;
0871   ctx->Map.in_action_loop = false;
0872   RtemsSemReqMrspObtain_Setup( ctx );
0873 }
0874 
0875 static void RtemsSemReqMrspObtain_Teardown(
0876   RtemsSemReqMrspObtain_Context *ctx
0877 )
0878 {
0879   TQMutexRelease( &ctx->tq_ctx, TQ_MUTEX_A );
0880   TQSendAndWaitForExecutionStop(
0881     &ctx->tq_ctx,
0882     ASSISTANT,
0883     TQ_EVENT_MUTEX_A_RELEASE
0884   );
0885   TQDestroy( &ctx->tq_ctx );
0886   DeleteMutex( ctx->sema_id );
0887 }
0888 
0889 static void RtemsSemReqMrspObtain_Teardown_Wrap( void *arg )
0890 {
0891   RtemsSemReqMrspObtain_Context *ctx;
0892 
0893   ctx = arg;
0894   ctx->Map.in_action_loop = false;
0895   RtemsSemReqMrspObtain_Teardown( ctx );
0896 }
0897 
0898 static void RtemsSemReqMrspObtain_Prepare( RtemsSemReqMrspObtain_Context *ctx )
0899 {
0900   ctx->task_scheduler = INVALID_ID;
0901   ctx->task_owns_mrsp_semaphore = false;
0902   ctx->scheduler_a_idle = false;
0903   ctx->scheduler_b_idle = false;
0904   ctx->helping_active = false;
0905   ctx->second_active = false;
0906   ctx->third_active = false;
0907 }
0908 
0909 static void RtemsSemReqMrspObtain_Action( RtemsSemReqMrspObtain_Context *ctx )
0910 {
0911   if ( ctx->task_owns_mrsp_semaphore ) {
0912     TQMutexObtain( &ctx->tq_ctx, TQ_MUTEX_B );
0913   }
0914 
0915   if ( ctx->helping_active ) {
0916     T_true( ctx->task_owns_mrsp_semaphore );
0917 
0918     TQSendAndWaitForIntendToBlock(
0919       &ctx->tq_ctx,
0920       HELPING,
0921       TQ_EVENT_MUTEX_B_OBTAIN
0922     );
0923 
0924     if ( ctx->scheduler_b_idle ) {
0925       SuspendTask( ctx->tq_ctx.worker_id[ HELPING ] );
0926     }
0927   }
0928 
0929   if ( ctx->scheduler_a_idle || ctx->second_active ) {
0930     MoveToScheduler( ctx, SCHEDULER_B_ID );
0931   }
0932 
0933   if ( ctx->second_active ) {
0934     T_false( ctx->third_active );
0935 
0936     TQSetPriority( &ctx->tq_ctx, SECOND, ctx->second_priority );
0937 
0938     if ( ctx->scheduler_a_idle ) {
0939       SetSemaphorePriority(
0940         ctx->tq_ctx.mutex_id[ TQ_MUTEX_C ],
0941         ctx->second_priority,
0942         ctx->second_priority
0943       );
0944       TQSendAndWaitForExecutionStop(
0945         &ctx->tq_ctx,
0946         SECOND,
0947         TQ_EVENT_MUTEX_C_OBTAIN
0948       );
0949     } else {
0950       ctx->tq_ctx.busy_wait[ SECOND ] = true;
0951       TQSend( &ctx->tq_ctx, SECOND, TQ_EVENT_BUSY_WAIT );
0952       TQWaitForEventsReceived( &ctx->tq_ctx, SECOND );
0953     }
0954   }
0955 
0956   if ( ctx->third_active ) {
0957     T_false( ctx->second_active );
0958 
0959     TQSetPriority( &ctx->tq_ctx, THIRD, ctx->third_priority );
0960 
0961     if ( ctx->scheduler_b_idle ) {
0962       SetSemaphorePriority(
0963         ctx->tq_ctx.mutex_id[ TQ_MUTEX_C ],
0964         ctx->third_priority,
0965         ctx->third_priority
0966       );
0967       TQSendAndWaitForExecutionStop(
0968         &ctx->tq_ctx,
0969         THIRD,
0970         TQ_EVENT_MUTEX_C_OBTAIN
0971       );
0972     } else {
0973       ctx->tq_ctx.busy_wait[ THIRD ] = true;
0974       TQSend( &ctx->tq_ctx, THIRD, TQ_EVENT_BUSY_WAIT );
0975       TQWaitForEventsReceived( &ctx->tq_ctx, THIRD );
0976     }
0977   }
0978 
0979   SetSemaphorePriority(
0980     ctx->sema_id,
0981     ctx->sema_priority_scheduler_a,
0982     ctx->sema_priority_scheduler_b
0983   );
0984   ObtainMutex( ctx->sema_id );
0985 }
0986 
0987 static void RtemsSemReqMrspObtain_Cleanup( RtemsSemReqMrspObtain_Context *ctx )
0988 {
0989   ReleaseMutex( ctx->sema_id );
0990 
0991   if ( ctx->task_owns_mrsp_semaphore ) {
0992     TQMutexRelease( &ctx->tq_ctx, TQ_MUTEX_B );
0993   }
0994 
0995   if ( ctx->second_active ) {
0996     MoveToScheduler( ctx, SCHEDULER_B_ID );
0997 
0998     if ( ctx->scheduler_a_idle ) {
0999       TQSendAndWaitForExecutionStop(
1000         &ctx->tq_ctx,
1001         SECOND,
1002         TQ_EVENT_MUTEX_C_RELEASE
1003       );
1004     } else {
1005       ctx->tq_ctx.busy_wait[ SECOND ] = false;
1006       TQWaitForExecutionStop( &ctx->tq_ctx, SECOND );
1007     }
1008   }
1009 
1010   if ( ctx->third_active ) {
1011     MoveToScheduler( ctx, SCHEDULER_A_ID );
1012 
1013     if ( ctx->scheduler_b_idle ) {
1014       TQSendAndWaitForExecutionStop(
1015         &ctx->tq_ctx,
1016         THIRD,
1017         TQ_EVENT_MUTEX_C_RELEASE
1018       );
1019     } else {
1020       ctx->tq_ctx.busy_wait[ THIRD ] = false;
1021       TQWaitForExecutionStop( &ctx->tq_ctx, THIRD );
1022     }
1023   }
1024 
1025   if ( ctx->helping_active ) {
1026     MoveToScheduler( ctx, SCHEDULER_A_ID );
1027 
1028     if ( ctx->scheduler_b_idle ) {
1029       ResumeTask( ctx->tq_ctx.worker_id[ HELPING ] );
1030     }
1031 
1032     TQSendAndWaitForExecutionStop(
1033       &ctx->tq_ctx,
1034       HELPING,
1035       TQ_EVENT_MUTEX_B_RELEASE
1036     );
1037   }
1038 
1039   MoveToScheduler( ctx, SCHEDULER_A_ID );
1040 }
1041 
1042 static const RtemsSemReqMrspObtain_Entry
1043 RtemsSemReqMrspObtain_Entries[] = {
1044   { 1, 0, 0, 0, 0, RtemsSemReqMrspObtain_Post_Home_NA,
1045     RtemsSemReqMrspObtain_Post_Helping_NA },
1046   { 1, 0, 0, 0, 0, RtemsSemReqMrspObtain_Post_Home_NA,
1047     RtemsSemReqMrspObtain_Post_Helping_NA },
1048   { 1, 0, 0, 0, 0, RtemsSemReqMrspObtain_Post_Home_NA,
1049     RtemsSemReqMrspObtain_Post_Helping_NA },
1050   { 1, 0, 0, 0, 0, RtemsSemReqMrspObtain_Post_Home_NA,
1051     RtemsSemReqMrspObtain_Post_Helping_NA },
1052   { 0, 0, 0, 0, 0, RtemsSemReqMrspObtain_Post_Home_Task,
1053     RtemsSemReqMrspObtain_Post_Helping_Third },
1054   { 0, 0, 0, 0, 0, RtemsSemReqMrspObtain_Post_Home_Task,
1055     RtemsSemReqMrspObtain_Post_Helping_ThirdIdle },
1056   { 0, 0, 0, 0, 0, RtemsSemReqMrspObtain_Post_Home_TaskIdle,
1057     RtemsSemReqMrspObtain_Post_Helping_Task },
1058   { 0, 0, 0, 0, 0, RtemsSemReqMrspObtain_Post_Home_Second,
1059     RtemsSemReqMrspObtain_Post_Helping_Task },
1060   { 0, 0, 0, 0, 0, RtemsSemReqMrspObtain_Post_Home_SecondIdle,
1061     RtemsSemReqMrspObtain_Post_Helping_Task },
1062   { 0, 0, 0, 0, 0, RtemsSemReqMrspObtain_Post_Home_Task,
1063     RtemsSemReqMrspObtain_Post_Helping_Helping },
1064   { 0, 0, 0, 0, 0, RtemsSemReqMrspObtain_Post_Home_Task,
1065     RtemsSemReqMrspObtain_Post_Helping_HelpingIdle },
1066   { 0, 0, 0, 0, 0, RtemsSemReqMrspObtain_Post_Home_Task,
1067     RtemsSemReqMrspObtain_Post_Helping_Idle }
1068 };
1069 
1070 static const uint8_t
1071 RtemsSemReqMrspObtain_Map[] = {
1072   2, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 1, 1, 1, 3, 0, 1, 1, 1, 3,
1073   0, 1, 1, 1, 6, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 1, 1, 1, 3, 3,
1074   1, 1, 1, 3, 3, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 1,
1075   1, 1, 3, 3, 1, 1, 1, 3, 3, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1076   0, 0, 3, 1, 1, 1, 0, 3, 1, 1, 1, 0, 3, 1, 1, 1, 2, 0, 2, 2, 2, 0, 0, 0, 0, 0,
1077   0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 0, 2, 2, 2, 0,
1078   0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 11,
1079   0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 1, 1, 1, 3, 0, 1, 1, 1, 3, 0,
1080   1, 1, 1, 2, 0, 1, 1, 1, 0, 2, 1, 1, 1, 0, 2, 1, 1, 1, 3, 3, 1, 1, 1, 3, 3, 1,
1081   1, 1, 3, 3, 1, 1, 1, 0, 0, 0, 0, 0, 0, 9, 1, 1, 1, 0, 9, 1, 1, 1, 3, 3, 1, 1,
1082   1, 3, 3, 1, 1, 1, 3, 3, 1, 1, 1, 0, 0, 0, 0, 0, 0, 10, 1, 1, 1, 0, 10, 1, 1,
1083   1, 0, 3, 1, 1, 1, 0, 3, 1, 1, 1, 0, 3, 1, 1, 1, 4, 0, 4, 4, 4, 0, 4, 4, 4, 4,
1084   0, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 5, 0, 5, 5, 5, 0,
1085   5, 5, 5, 5, 0, 5, 5, 5, 5, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0,
1086   0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1,
1087   1, 1, 0, 0, 0, 0, 0, 0, 6, 1, 1, 1, 0, 6, 1, 1, 1, 0, 3, 1, 1, 1, 0, 3, 1, 1,
1088   1, 0, 3, 1, 1, 1, 0, 0, 0, 0, 0, 0, 2, 1, 1, 1, 0, 2, 1, 1, 1, 0, 3, 1, 1, 1,
1089   0, 3, 1, 1, 1, 0, 3, 1, 1, 1, 0, 0, 0, 0, 0, 0, 2, 1, 1, 1, 0, 2, 1, 1, 1, 0,
1090   3, 1, 1, 1, 0, 3, 1, 1, 1, 0, 3, 1, 1, 1, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 0, 2,
1091   2, 2, 2, 0, 3, 3, 3, 3, 0, 3, 3, 3, 3, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 2, 2,
1092   2, 2, 0, 2, 2, 2, 2, 0, 3, 3, 3, 3, 0, 3, 3, 3, 3, 0, 3, 3, 3, 3, 2, 0, 1, 1,
1093   1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 1, 1, 1, 2, 0, 1, 1, 1, 2, 0, 1, 1, 1,
1094   7, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 6, 1, 1, 1, 7, 7, 1, 1, 1, 7,
1095   7, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 1, 1, 1, 2, 2,
1096   1, 1, 1, 2, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1,
1097   1, 1, 0, 2, 1, 1, 1, 0, 2, 1, 1, 1, 2, 0, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1098   0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 2, 2, 0, 0, 0, 0, 0,
1099   0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 1, 1, 1, 0,
1100   0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 1, 1, 1, 2, 0, 1, 1, 1, 2, 0, 1, 1, 1, 8, 0,
1101   1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 6, 1, 1, 1, 8, 8, 1, 1, 1, 8, 8, 1,
1102   1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 1, 1, 1, 2, 2, 1, 1,
1103   1, 2, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 1, 1,
1104   0, 2, 1, 1, 1, 0, 2, 1, 1, 1, 2, 0, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2,
1105   2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0,
1106   0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
1107 };
1108 
1109 static size_t RtemsSemReqMrspObtain_Scope( void *arg, char *buf, size_t n )
1110 {
1111   RtemsSemReqMrspObtain_Context *ctx;
1112 
1113   ctx = arg;
1114 
1115   if ( ctx->Map.in_action_loop ) {
1116     return T_get_scope( RtemsSemReqMrspObtain_PreDesc, buf, n, ctx->Map.pcs );
1117   }
1118 
1119   return 0;
1120 }
1121 
1122 static T_fixture RtemsSemReqMrspObtain_Fixture = {
1123   .setup = RtemsSemReqMrspObtain_Setup_Wrap,
1124   .stop = NULL,
1125   .teardown = RtemsSemReqMrspObtain_Teardown_Wrap,
1126   .scope = RtemsSemReqMrspObtain_Scope,
1127   .initial_context = &RtemsSemReqMrspObtain_Instance
1128 };
1129 
1130 static inline RtemsSemReqMrspObtain_Entry RtemsSemReqMrspObtain_PopEntry(
1131   RtemsSemReqMrspObtain_Context *ctx
1132 )
1133 {
1134   size_t index;
1135 
1136   index = ctx->Map.index;
1137   ctx->Map.index = index + 1;
1138   return RtemsSemReqMrspObtain_Entries[
1139     RtemsSemReqMrspObtain_Map[ index ]
1140   ];
1141 }
1142 
1143 static void RtemsSemReqMrspObtain_TestVariant(
1144   RtemsSemReqMrspObtain_Context *ctx
1145 )
1146 {
1147   RtemsSemReqMrspObtain_Pre_Home_Prepare( ctx, ctx->Map.pcs[ 0 ] );
1148   RtemsSemReqMrspObtain_Pre_Helping_Prepare( ctx, ctx->Map.pcs[ 1 ] );
1149   RtemsSemReqMrspObtain_Pre_PriorityHome_Prepare( ctx, ctx->Map.pcs[ 2 ] );
1150   RtemsSemReqMrspObtain_Pre_PriorityHelping_Prepare( ctx, ctx->Map.pcs[ 3 ] );
1151   RtemsSemReqMrspObtain_Action( ctx );
1152   RtemsSemReqMrspObtain_Post_Home_Check( ctx, ctx->Map.entry.Post_Home );
1153   RtemsSemReqMrspObtain_Post_Helping_Check( ctx, ctx->Map.entry.Post_Helping );
1154 }
1155 
1156 /**
1157  * @fn void T_case_body_RtemsSemReqMrspObtain( void )
1158  */
1159 T_TEST_CASE_FIXTURE( RtemsSemReqMrspObtain, &RtemsSemReqMrspObtain_Fixture )
1160 {
1161   RtemsSemReqMrspObtain_Context *ctx;
1162 
1163   ctx = T_fixture_context();
1164   ctx->Map.in_action_loop = true;
1165   ctx->Map.index = 0;
1166 
1167   for (
1168     ctx->Map.pcs[ 0 ] = RtemsSemReqMrspObtain_Pre_Home_Idle;
1169     ctx->Map.pcs[ 0 ] < RtemsSemReqMrspObtain_Pre_Home_NA;
1170     ++ctx->Map.pcs[ 0 ]
1171   ) {
1172     for (
1173       ctx->Map.pcs[ 1 ] = RtemsSemReqMrspObtain_Pre_Helping_Idle;
1174       ctx->Map.pcs[ 1 ] < RtemsSemReqMrspObtain_Pre_Helping_NA;
1175       ++ctx->Map.pcs[ 1 ]
1176     ) {
1177       for (
1178         ctx->Map.pcs[ 2 ] = RtemsSemReqMrspObtain_Pre_PriorityHome_None;
1179         ctx->Map.pcs[ 2 ] < RtemsSemReqMrspObtain_Pre_PriorityHome_NA;
1180         ++ctx->Map.pcs[ 2 ]
1181       ) {
1182         for (
1183           ctx->Map.pcs[ 3 ] = RtemsSemReqMrspObtain_Pre_PriorityHelping_None;
1184           ctx->Map.pcs[ 3 ] < RtemsSemReqMrspObtain_Pre_PriorityHelping_NA;
1185           ++ctx->Map.pcs[ 3 ]
1186         ) {
1187           ctx->Map.entry = RtemsSemReqMrspObtain_PopEntry( ctx );
1188 
1189           if ( ctx->Map.entry.Skip ) {
1190             continue;
1191           }
1192 
1193           RtemsSemReqMrspObtain_Prepare( ctx );
1194           RtemsSemReqMrspObtain_TestVariant( ctx );
1195           RtemsSemReqMrspObtain_Cleanup( ctx );
1196         }
1197       }
1198     }
1199   }
1200 }
1201 
1202 /** @} */