Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:24:53

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /**
0004  * @file
0005  *
0006  * @ingroup ScoreTqReqSurrender
0007  */
0008 
0009 /*
0010  * Copyright (C) 2021 embedded brains GmbH & Co. KG
0011  *
0012  * Redistribution and use in source and binary forms, with or without
0013  * modification, are permitted provided that the following conditions
0014  * are met:
0015  * 1. Redistributions of source code must retain the above copyright
0016  *    notice, this list of conditions and the following disclaimer.
0017  * 2. Redistributions in binary form must reproduce the above copyright
0018  *    notice, this list of conditions and the following disclaimer in the
0019  *    documentation and/or other materials provided with the distribution.
0020  *
0021  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0022  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0023  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0024  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
0025  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0026  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0027  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0028  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0029  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0030  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
0031  * POSSIBILITY OF SUCH DAMAGE.
0032  */
0033 
0034 /*
0035  * This file is part of the RTEMS quality process and was automatically
0036  * generated.  If you find something that needs to be fixed or
0037  * worded better please post a report or patch to an RTEMS mailing list
0038  * or raise a bug report:
0039  *
0040  * https://www.rtems.org/bugs.html
0041  *
0042  * For information on updating and regenerating please refer to the How-To
0043  * section in the Software Requirements Engineering chapter of the
0044  * RTEMS Software Engineering manual.  The manual is provided as a part of
0045  * a release.  For development sources please refer to the online
0046  * documentation at:
0047  *
0048  * https://docs.rtems.org
0049  */
0050 
0051 #ifdef HAVE_CONFIG_H
0052 #include "config.h"
0053 #endif
0054 
0055 #include <rtems/score/smpbarrier.h>
0056 #include <rtems/score/threadimpl.h>
0057 
0058 #include "tr-tq-surrender.h"
0059 #include "tx-support.h"
0060 
0061 #include <rtems/test.h>
0062 
0063 /**
0064  * @defgroup ScoreTqReqSurrender spec:/score/tq/req/surrender
0065  *
0066  * @ingroup TestsuitesValidationNoClock0
0067  *
0068  * @{
0069  */
0070 
0071 typedef struct {
0072   uint8_t Skip : 1;
0073   uint8_t Pre_HasOwner_NA : 1;
0074   uint8_t Pre_Discipline_NA : 1;
0075   uint8_t Pre_WaitState_NA : 1;
0076   uint8_t Post_Dequeue : 2;
0077   uint8_t Post_Unblock : 2;
0078 } ScoreTqReqSurrender_Entry;
0079 
0080 /**
0081  * @brief Test context for spec:/score/tq/req/surrender test case.
0082  */
0083 typedef struct {
0084   /**
0085    * @brief This member contains the call within ISR request.
0086    */
0087   CallWithinISRRequest request;
0088 
0089   /**
0090    * @brief This member contains the barrier to synchronize the runner and the
0091    *   worker.
0092    */
0093   SMP_barrier_Control barrier;
0094 
0095   /**
0096    * @brief If this member is true, then the dequeued thread shall be in the
0097    *   intend to block wait state.
0098    */
0099   bool intend_to_block;
0100 
0101   /**
0102    * @brief If this member contains the expected counter of worker B.
0103    */
0104   uint32_t expected_blocker_b_counter;
0105 
0106   /**
0107    * @brief This member contains a copy of the corresponding
0108    *   ScoreTqReqSurrender_Run() parameter.
0109    */
0110   TQContext *tq_ctx;
0111 
0112   struct {
0113     /**
0114      * @brief This member defines the pre-condition states for the next action.
0115      */
0116     size_t pcs[ 3 ];
0117 
0118     /**
0119      * @brief If this member is true, then the test action loop is executed.
0120      */
0121     bool in_action_loop;
0122 
0123     /**
0124      * @brief This member contains the next transition map index.
0125      */
0126     size_t index;
0127 
0128     /**
0129      * @brief This member contains the current transition map entry.
0130      */
0131     ScoreTqReqSurrender_Entry entry;
0132 
0133     /**
0134      * @brief If this member is true, then the current transition variant
0135      *   should be skipped.
0136      */
0137     bool skip;
0138   } Map;
0139 } ScoreTqReqSurrender_Context;
0140 
0141 static ScoreTqReqSurrender_Context
0142   ScoreTqReqSurrender_Instance;
0143 
0144 static const char * const ScoreTqReqSurrender_PreDesc_HasOwner[] = {
0145   "Yes",
0146   "No",
0147   "NA"
0148 };
0149 
0150 static const char * const ScoreTqReqSurrender_PreDesc_Discipline[] = {
0151   "FIFO",
0152   "Priority",
0153   "NA"
0154 };
0155 
0156 static const char * const ScoreTqReqSurrender_PreDesc_WaitState[] = {
0157   "Blocked",
0158   "IntendToBlock",
0159   "NA"
0160 };
0161 
0162 static const char * const * const ScoreTqReqSurrender_PreDesc[] = {
0163   ScoreTqReqSurrender_PreDesc_HasOwner,
0164   ScoreTqReqSurrender_PreDesc_Discipline,
0165   ScoreTqReqSurrender_PreDesc_WaitState,
0166   NULL
0167 };
0168 
0169 typedef ScoreTqReqSurrender_Context Context;
0170 
0171 static const rtems_tcb *GetUnblock( Context *ctx, size_t *index )
0172 {
0173   return TQGetNextUnblock( ctx->tq_ctx, index )->thread;
0174 }
0175 
0176 static const rtems_tcb *GetTCB( Context *ctx, TQWorkerKind worker )
0177 {
0178   return ctx->tq_ctx->worker_tcb[ worker ];
0179 }
0180 
0181 static void Surrender( void *arg )
0182 {
0183   Context       *ctx;
0184   Status_Control status;
0185 
0186   ctx = arg;
0187   TQSchedulerRecordStart( ctx->tq_ctx );
0188 
0189   status = TQSurrender( ctx->tq_ctx );
0190   T_eq_int( status, TQConvertStatus( ctx->tq_ctx, STATUS_SUCCESSFUL ) );
0191 
0192   TQSchedulerRecordStop( ctx->tq_ctx );
0193 }
0194 
0195 #if defined(RTEMS_SMP)
0196 static void Delay( void *arg )
0197 {
0198   Context          *ctx;
0199   SMP_barrier_State state;
0200 
0201   ctx = arg;
0202   _SMP_barrier_State_initialize( &state );
0203 
0204   /* B0 */
0205   _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
0206 
0207   /* B1 */
0208   _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
0209 }
0210 #endif
0211 
0212 static void SchedulerBlock(
0213   void                    *arg,
0214   const T_scheduler_event *event,
0215   T_scheduler_when         when
0216 )
0217 {
0218   Context *ctx;
0219 
0220   ctx = arg;
0221 
0222   if (
0223     when == T_SCHEDULER_BEFORE &&
0224     event->operation == T_SCHEDULER_BLOCK
0225   ) {
0226     T_scheduler_set_event_handler( NULL, NULL );
0227 #if defined(RTEMS_SMP)
0228     ctx->request.handler = Delay;
0229 #else
0230     ctx->request.handler = Surrender;
0231 #endif
0232     CallWithinISRSubmit( &ctx->request );
0233   }
0234 }
0235 
0236 static void ScoreTqReqSurrender_Pre_HasOwner_Prepare(
0237   ScoreTqReqSurrender_Context     *ctx,
0238   ScoreTqReqSurrender_Pre_HasOwner state
0239 )
0240 {
0241   switch ( state ) {
0242     case ScoreTqReqSurrender_Pre_HasOwner_Yes: {
0243       /*
0244        * Where the thread queue has a previous owner thread.
0245        */
0246       if ( ctx->tq_ctx->get_owner == NULL ) {
0247         ctx->Map.skip = true;
0248       }
0249       break;
0250     }
0251 
0252     case ScoreTqReqSurrender_Pre_HasOwner_No: {
0253       /*
0254        * Where the thread queue has no owner threads.
0255        */
0256       if ( ctx->tq_ctx->get_owner != NULL ) {
0257         ctx->Map.skip = true;
0258       }
0259       break;
0260     }
0261 
0262     case ScoreTqReqSurrender_Pre_HasOwner_NA:
0263       break;
0264   }
0265 }
0266 
0267 static void ScoreTqReqSurrender_Pre_Discipline_Prepare(
0268   ScoreTqReqSurrender_Context       *ctx,
0269   ScoreTqReqSurrender_Pre_Discipline state
0270 )
0271 {
0272   switch ( state ) {
0273     case ScoreTqReqSurrender_Pre_Discipline_FIFO: {
0274       /*
0275        * Where the thread queue uses the FIFO discipline.
0276        */
0277       if ( ctx->tq_ctx->discipline != TQ_FIFO ) {
0278         ctx->Map.skip = true;
0279       }
0280       break;
0281     }
0282 
0283     case ScoreTqReqSurrender_Pre_Discipline_Priority: {
0284       /*
0285        * Where the thread queue uses the priority discipline.
0286        */
0287       if ( ctx->tq_ctx->discipline != TQ_PRIORITY ) {
0288         ctx->Map.skip = true;
0289       }
0290       break;
0291     }
0292 
0293     case ScoreTqReqSurrender_Pre_Discipline_NA:
0294       break;
0295   }
0296 }
0297 
0298 static void ScoreTqReqSurrender_Pre_WaitState_Prepare(
0299   ScoreTqReqSurrender_Context      *ctx,
0300   ScoreTqReqSurrender_Pre_WaitState state
0301 )
0302 {
0303   switch ( state ) {
0304     case ScoreTqReqSurrender_Pre_WaitState_Blocked: {
0305       /*
0306        * While the dequeued thread is in the blocked wait state.
0307        */
0308       ctx->intend_to_block = false;
0309       break;
0310     }
0311 
0312     case ScoreTqReqSurrender_Pre_WaitState_IntendToBlock: {
0313       /*
0314        * While the dequeued thread is in the intend to block wait state.
0315        */
0316       ctx->intend_to_block = true;
0317       break;
0318     }
0319 
0320     case ScoreTqReqSurrender_Pre_WaitState_NA:
0321       break;
0322   }
0323 }
0324 
0325 static void ScoreTqReqSurrender_Post_Dequeue_Check(
0326   ScoreTqReqSurrender_Context     *ctx,
0327   ScoreTqReqSurrender_Post_Dequeue state
0328 )
0329 {
0330   switch ( state ) {
0331     case ScoreTqReqSurrender_Post_Dequeue_FIFO: {
0332       /*
0333        * The first thread in FIFO order shall be dequeued from the thread
0334        * queue.
0335        */
0336       T_eq_u32( TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_A ), 1 );
0337       T_eq_u32(
0338         TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_B ),
0339         ctx->expected_blocker_b_counter
0340       );
0341       break;
0342     }
0343 
0344     case ScoreTqReqSurrender_Post_Dequeue_Priority: {
0345       /*
0346        * The first thread in priority order shall be dequeued from the thread
0347        * queue.
0348        */
0349       T_eq_u32( TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_A ), 1 );
0350       T_eq_u32( TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_B ), 2 );
0351       break;
0352     }
0353 
0354     case ScoreTqReqSurrender_Post_Dequeue_NA:
0355       break;
0356   }
0357 }
0358 
0359 static void ScoreTqReqSurrender_Post_Unblock_Check(
0360   ScoreTqReqSurrender_Context     *ctx,
0361   ScoreTqReqSurrender_Post_Unblock state
0362 )
0363 {
0364   size_t i;
0365 
0366   i = 0;
0367 
0368   switch ( state ) {
0369     case ScoreTqReqSurrender_Post_Unblock_Yes: {
0370       /*
0371        * The dequeued thread shall be unblocked by surrender operation.
0372        */
0373       T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_A ) );
0374       T_eq_ptr( GetUnblock( ctx, &i ), NULL );
0375       break;
0376     }
0377 
0378     case ScoreTqReqSurrender_Post_Unblock_No: {
0379       /*
0380        * The dequeued thread shall not be unblocked by surrender operation.
0381        */
0382       T_eq_ptr( GetUnblock( ctx, &i ), NULL );
0383       break;
0384     }
0385 
0386     case ScoreTqReqSurrender_Post_Unblock_NA:
0387       break;
0388   }
0389 }
0390 
0391 static void ScoreTqReqSurrender_Setup( ScoreTqReqSurrender_Context *ctx )
0392 {
0393   ctx->request.arg = ctx;
0394   TQReset( ctx->tq_ctx );
0395   TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_A, PRIO_VERY_HIGH );
0396   TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_B, PRIO_HIGH );
0397 
0398   #if defined(RTEMS_SMP)
0399   /*
0400    * For the mutexes with priority ceiling protocol, we need a scheduler with
0401    * two processors to set up the intend to block wait state.
0402    */
0403   RemoveProcessor( SCHEDULER_B_ID, 1 );
0404   AddProcessor( SCHEDULER_A_ID, 1 );
0405   #endif
0406 }
0407 
0408 static void ScoreTqReqSurrender_Setup_Wrap( void *arg )
0409 {
0410   ScoreTqReqSurrender_Context *ctx;
0411 
0412   ctx = arg;
0413   ctx->Map.in_action_loop = false;
0414   ScoreTqReqSurrender_Setup( ctx );
0415 }
0416 
0417 static void ScoreTqReqSurrender_Teardown( ScoreTqReqSurrender_Context *ctx )
0418 {
0419   TQReset( ctx->tq_ctx );
0420 
0421   #if defined(RTEMS_SMP)
0422   RemoveProcessor( SCHEDULER_A_ID, 1 );
0423   AddProcessor( SCHEDULER_B_ID, 1 );
0424   #endif
0425 }
0426 
0427 static void ScoreTqReqSurrender_Teardown_Wrap( void *arg )
0428 {
0429   ScoreTqReqSurrender_Context *ctx;
0430 
0431   ctx = arg;
0432   ctx->Map.in_action_loop = false;
0433   ScoreTqReqSurrender_Teardown( ctx );
0434 }
0435 
0436 static void ScoreTqReqSurrender_Action( ScoreTqReqSurrender_Context *ctx )
0437 {
0438   Status_Control status;
0439 
0440   TQResetCounter( ctx->tq_ctx );
0441   ctx->expected_blocker_b_counter = 0;
0442 
0443   status = TQEnqueue( ctx->tq_ctx, TQ_NO_WAIT );
0444   T_eq_int( status, TQConvertStatus( ctx->tq_ctx, STATUS_SUCCESSFUL ) );
0445 
0446   if ( ctx->intend_to_block ) {
0447   #if defined(RTEMS_SMP)
0448     SMP_barrier_State state;
0449   #endif
0450 
0451     /*
0452      * In uniprocessor configurations, it is impossible to dequeue a thread
0453      * in FIFO order which is in the intend to block wait state.  Run this
0454      * test with just one worker.
0455      */
0456     if ( ctx->tq_ctx->discipline != TQ_FIFO ) {
0457       TQSendAndWaitForExecutionStop(
0458         ctx->tq_ctx,
0459         TQ_BLOCKER_B,
0460         TQ_EVENT_ENQUEUE
0461       );
0462       ctx->expected_blocker_b_counter = 2;
0463     }
0464 
0465 
0466   #if defined(RTEMS_SMP)
0467     _SMP_barrier_Control_initialize( &ctx->barrier );
0468     _SMP_barrier_State_initialize( &state );
0469   #endif
0470 
0471     T_scheduler_set_event_handler( SchedulerBlock, ctx );
0472     TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE );
0473 
0474   #if defined(RTEMS_SMP)
0475     /* B0 */
0476     _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
0477 
0478     Surrender( ctx );
0479 
0480     /* B1 */
0481     _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
0482   #endif
0483   } else {
0484     TQSend(
0485       ctx->tq_ctx,
0486       TQ_BLOCKER_A,
0487       TQ_EVENT_HELPER_A_SYNC | TQ_EVENT_ENQUEUE
0488     );
0489     TQSynchronizeRunner();
0490     TQWaitForExecutionStop( ctx->tq_ctx, TQ_BLOCKER_A );
0491 
0492     TQSend(
0493       ctx->tq_ctx,
0494       TQ_BLOCKER_B,
0495       TQ_EVENT_HELPER_A_SYNC | TQ_EVENT_ENQUEUE
0496     );
0497     TQSynchronizeRunner();
0498     TQWaitForExecutionStop( ctx->tq_ctx, TQ_BLOCKER_B );
0499     ctx->expected_blocker_b_counter = 2;
0500 
0501     Surrender( ctx );
0502   }
0503 
0504   TQSendAndWaitForExecutionStop(
0505     ctx->tq_ctx,
0506     TQ_BLOCKER_A,
0507     TQ_EVENT_SURRENDER
0508   );
0509 
0510   if ( ctx->expected_blocker_b_counter != 0 ) {
0511     TQSendAndWaitForExecutionStop(
0512       ctx->tq_ctx,
0513       TQ_BLOCKER_B,
0514       TQ_EVENT_SURRENDER
0515     );
0516   }
0517 }
0518 
0519 static const ScoreTqReqSurrender_Entry
0520 ScoreTqReqSurrender_Entries[] = {
0521   { 0, 0, 0, 0, ScoreTqReqSurrender_Post_Dequeue_FIFO,
0522     ScoreTqReqSurrender_Post_Unblock_Yes },
0523   { 0, 0, 0, 0, ScoreTqReqSurrender_Post_Dequeue_Priority,
0524     ScoreTqReqSurrender_Post_Unblock_Yes },
0525 #if !defined(RTEMS_SMP)
0526   { 1, 0, 0, 0, ScoreTqReqSurrender_Post_Dequeue_NA,
0527     ScoreTqReqSurrender_Post_Unblock_NA },
0528 #else
0529   { 0, 0, 0, 0, ScoreTqReqSurrender_Post_Dequeue_FIFO,
0530     ScoreTqReqSurrender_Post_Unblock_No },
0531 #endif
0532 #if !defined(RTEMS_SMP)
0533   { 1, 0, 0, 0, ScoreTqReqSurrender_Post_Dequeue_NA,
0534     ScoreTqReqSurrender_Post_Unblock_NA },
0535 #else
0536   { 0, 0, 0, 0, ScoreTqReqSurrender_Post_Dequeue_Priority,
0537     ScoreTqReqSurrender_Post_Unblock_No },
0538 #endif
0539   { 0, 0, 0, 0, ScoreTqReqSurrender_Post_Dequeue_FIFO,
0540     ScoreTqReqSurrender_Post_Unblock_No },
0541   { 0, 0, 0, 0, ScoreTqReqSurrender_Post_Dequeue_Priority,
0542     ScoreTqReqSurrender_Post_Unblock_No }
0543 };
0544 
0545 static const uint8_t
0546 ScoreTqReqSurrender_Map[] = {
0547   0, 2, 1, 3, 0, 4, 1, 5
0548 };
0549 
0550 static size_t ScoreTqReqSurrender_Scope( void *arg, char *buf, size_t n )
0551 {
0552   ScoreTqReqSurrender_Context *ctx;
0553 
0554   ctx = arg;
0555 
0556   if ( ctx->Map.in_action_loop ) {
0557     return T_get_scope( ScoreTqReqSurrender_PreDesc, buf, n, ctx->Map.pcs );
0558   }
0559 
0560   return 0;
0561 }
0562 
0563 static T_fixture ScoreTqReqSurrender_Fixture = {
0564   .setup = ScoreTqReqSurrender_Setup_Wrap,
0565   .stop = NULL,
0566   .teardown = ScoreTqReqSurrender_Teardown_Wrap,
0567   .scope = ScoreTqReqSurrender_Scope,
0568   .initial_context = &ScoreTqReqSurrender_Instance
0569 };
0570 
0571 static const uint8_t ScoreTqReqSurrender_Weights[] = {
0572   4, 2, 1
0573 };
0574 
0575 static void ScoreTqReqSurrender_Skip(
0576   ScoreTqReqSurrender_Context *ctx,
0577   size_t                       index
0578 )
0579 {
0580   switch ( index + 1 ) {
0581     case 1:
0582       ctx->Map.pcs[ 1 ] = ScoreTqReqSurrender_Pre_Discipline_NA - 1;
0583       /* Fall through */
0584     case 2:
0585       ctx->Map.pcs[ 2 ] = ScoreTqReqSurrender_Pre_WaitState_NA - 1;
0586       break;
0587   }
0588 }
0589 
0590 static inline ScoreTqReqSurrender_Entry ScoreTqReqSurrender_PopEntry(
0591   ScoreTqReqSurrender_Context *ctx
0592 )
0593 {
0594   size_t index;
0595 
0596   if ( ctx->Map.skip ) {
0597     size_t i;
0598 
0599     ctx->Map.skip = false;
0600     index = 0;
0601 
0602     for ( i = 0; i < 3; ++i ) {
0603       index += ScoreTqReqSurrender_Weights[ i ] * ctx->Map.pcs[ i ];
0604     }
0605   } else {
0606     index = ctx->Map.index;
0607   }
0608 
0609   ctx->Map.index = index + 1;
0610 
0611   return ScoreTqReqSurrender_Entries[
0612     ScoreTqReqSurrender_Map[ index ]
0613   ];
0614 }
0615 
0616 static void ScoreTqReqSurrender_TestVariant( ScoreTqReqSurrender_Context *ctx )
0617 {
0618   ScoreTqReqSurrender_Pre_HasOwner_Prepare( ctx, ctx->Map.pcs[ 0 ] );
0619 
0620   if ( ctx->Map.skip ) {
0621     ScoreTqReqSurrender_Skip( ctx, 0 );
0622     return;
0623   }
0624 
0625   ScoreTqReqSurrender_Pre_Discipline_Prepare( ctx, ctx->Map.pcs[ 1 ] );
0626 
0627   if ( ctx->Map.skip ) {
0628     ScoreTqReqSurrender_Skip( ctx, 1 );
0629     return;
0630   }
0631 
0632   ScoreTqReqSurrender_Pre_WaitState_Prepare( ctx, ctx->Map.pcs[ 2 ] );
0633   ScoreTqReqSurrender_Action( ctx );
0634   ScoreTqReqSurrender_Post_Dequeue_Check( ctx, ctx->Map.entry.Post_Dequeue );
0635   ScoreTqReqSurrender_Post_Unblock_Check( ctx, ctx->Map.entry.Post_Unblock );
0636 }
0637 
0638 static T_fixture_node ScoreTqReqSurrender_Node;
0639 
0640 static T_remark ScoreTqReqSurrender_Remark = {
0641   .next = NULL,
0642   .remark = "ScoreTqReqSurrender"
0643 };
0644 
0645 void ScoreTqReqSurrender_Run( TQContext *tq_ctx )
0646 {
0647   ScoreTqReqSurrender_Context *ctx;
0648 
0649   ctx = &ScoreTqReqSurrender_Instance;
0650   ctx->tq_ctx = tq_ctx;
0651 
0652   ctx = T_push_fixture(
0653     &ScoreTqReqSurrender_Node,
0654     &ScoreTqReqSurrender_Fixture
0655   );
0656   ctx->Map.in_action_loop = true;
0657   ctx->Map.index = 0;
0658   ctx->Map.skip = false;
0659 
0660   for (
0661     ctx->Map.pcs[ 0 ] = ScoreTqReqSurrender_Pre_HasOwner_Yes;
0662     ctx->Map.pcs[ 0 ] < ScoreTqReqSurrender_Pre_HasOwner_NA;
0663     ++ctx->Map.pcs[ 0 ]
0664   ) {
0665     for (
0666       ctx->Map.pcs[ 1 ] = ScoreTqReqSurrender_Pre_Discipline_FIFO;
0667       ctx->Map.pcs[ 1 ] < ScoreTqReqSurrender_Pre_Discipline_NA;
0668       ++ctx->Map.pcs[ 1 ]
0669     ) {
0670       for (
0671         ctx->Map.pcs[ 2 ] = ScoreTqReqSurrender_Pre_WaitState_Blocked;
0672         ctx->Map.pcs[ 2 ] < ScoreTqReqSurrender_Pre_WaitState_NA;
0673         ++ctx->Map.pcs[ 2 ]
0674       ) {
0675         ctx->Map.entry = ScoreTqReqSurrender_PopEntry( ctx );
0676 
0677         if ( ctx->Map.entry.Skip ) {
0678           continue;
0679         }
0680 
0681         ScoreTqReqSurrender_TestVariant( ctx );
0682       }
0683     }
0684   }
0685 
0686   T_add_remark( &ScoreTqReqSurrender_Remark );
0687   T_pop_fixture();
0688 }
0689 
0690 /** @} */