Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:24:53

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /**
0004  * @file
0005  *
0006  * @ingroup ScoreTqReqEnqueueDeadlock
0007  */
0008 
0009 /*
0010  * Copyright (C) 2021 embedded brains GmbH & Co. KG
0011  *
0012  * Redistribution and use in source and binary forms, with or without
0013  * modification, are permitted provided that the following conditions
0014  * are met:
0015  * 1. Redistributions of source code must retain the above copyright
0016  *    notice, this list of conditions and the following disclaimer.
0017  * 2. Redistributions in binary form must reproduce the above copyright
0018  *    notice, this list of conditions and the following disclaimer in the
0019  *    documentation and/or other materials provided with the distribution.
0020  *
0021  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0022  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0023  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0024  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
0025  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0026  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0027  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0028  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0029  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0030  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
0031  * POSSIBILITY OF SUCH DAMAGE.
0032  */
0033 
0034 /*
0035  * This file is part of the RTEMS quality process and was automatically
0036  * generated.  If you find something that needs to be fixed or
0037  * worded better please post a report or patch to an RTEMS mailing list
0038  * or raise a bug report:
0039  *
0040  * https://www.rtems.org/bugs.html
0041  *
0042  * For information on updating and regenerating please refer to the How-To
0043  * section in the Software Requirements Engineering chapter of the
0044  * RTEMS Software Engineering manual.  The manual is provided as a part of
0045  * a release.  For development sources please refer to the online
0046  * documentation at:
0047  *
0048  * https://docs.rtems.org
0049  */
0050 
0051 #ifdef HAVE_CONFIG_H
0052 #include "config.h"
0053 #endif
0054 
0055 #include "tr-tq-enqueue-deadlock.h"
0056 
0057 #include <rtems/test.h>
0058 
0059 /**
0060  * @defgroup ScoreTqReqEnqueueDeadlock spec:/score/tq/req/enqueue-deadlock
0061  *
0062  * @ingroup TestsuitesValidationNoClock0
0063  *
0064  * @{
0065  */
0066 
0067 typedef struct {
0068   uint8_t Skip : 1;
0069   uint8_t Pre_Notification_NA : 1;
0070   uint8_t Pre_Deadlock_NA : 1;
0071   uint8_t Post_Result : 2;
0072 } ScoreTqReqEnqueueDeadlock_Entry;
0073 
0074 /**
0075  * @brief Test context for spec:/score/tq/req/enqueue-deadlock test case.
0076  */
0077 typedef struct {
0078   /**
0079    * @brief If this member is true, then more than one mutex shall be used for
0080    *   the deadlock scenario.
0081    */
0082   bool more;
0083 
0084   /**
0085    * @brief This member contains a copy of the corresponding
0086    *   ScoreTqReqEnqueueDeadlock_Run() parameter.
0087    */
0088   TQContext *tq_ctx;
0089 
0090   struct {
0091     /**
0092      * @brief This member defines the pre-condition states for the next action.
0093      */
0094     size_t pcs[ 2 ];
0095 
0096     /**
0097      * @brief If this member is true, then the test action loop is executed.
0098      */
0099     bool in_action_loop;
0100 
0101     /**
0102      * @brief This member contains the next transition map index.
0103      */
0104     size_t index;
0105 
0106     /**
0107      * @brief This member contains the current transition map entry.
0108      */
0109     ScoreTqReqEnqueueDeadlock_Entry entry;
0110 
0111     /**
0112      * @brief If this member is true, then the current transition variant
0113      *   should be skipped.
0114      */
0115     bool skip;
0116   } Map;
0117 } ScoreTqReqEnqueueDeadlock_Context;
0118 
0119 static ScoreTqReqEnqueueDeadlock_Context
0120   ScoreTqReqEnqueueDeadlock_Instance;
0121 
0122 static const char * const ScoreTqReqEnqueueDeadlock_PreDesc_Notification[] = {
0123   "Status",
0124   "Fatal",
0125   "NA"
0126 };
0127 
0128 static const char * const ScoreTqReqEnqueueDeadlock_PreDesc_Deadlock[] = {
0129   "One",
0130   "More",
0131   "NA"
0132 };
0133 
0134 static const char * const * const ScoreTqReqEnqueueDeadlock_PreDesc[] = {
0135   ScoreTqReqEnqueueDeadlock_PreDesc_Notification,
0136   ScoreTqReqEnqueueDeadlock_PreDesc_Deadlock,
0137   NULL
0138 };
0139 
0140 static void ScoreTqReqEnqueueDeadlock_Pre_Notification_Prepare(
0141   ScoreTqReqEnqueueDeadlock_Context         *ctx,
0142   ScoreTqReqEnqueueDeadlock_Pre_Notification state
0143 )
0144 {
0145   switch ( state ) {
0146     case ScoreTqReqEnqueueDeadlock_Pre_Notification_Status: {
0147       /*
0148        * Where a detected deadlock results in a return with a status code.
0149        */
0150       if ( ctx->tq_ctx->deadlock != TQ_DEADLOCK_STATUS ) {
0151         ctx->Map.skip = true;
0152       }
0153       break;
0154     }
0155 
0156     case ScoreTqReqEnqueueDeadlock_Pre_Notification_Fatal: {
0157       /*
0158        * Where a detected deadlock results in a fatal error.
0159        */
0160       if ( ctx->tq_ctx->deadlock != TQ_DEADLOCK_FATAL ) {
0161         ctx->Map.skip = true;
0162       }
0163       break;
0164     }
0165 
0166     case ScoreTqReqEnqueueDeadlock_Pre_Notification_NA:
0167       break;
0168   }
0169 }
0170 
0171 static void ScoreTqReqEnqueueDeadlock_Pre_Deadlock_Prepare(
0172   ScoreTqReqEnqueueDeadlock_Context     *ctx,
0173   ScoreTqReqEnqueueDeadlock_Pre_Deadlock state
0174 )
0175 {
0176   switch ( state ) {
0177     case ScoreTqReqEnqueueDeadlock_Pre_Deadlock_One: {
0178       /*
0179        * While the owner of the thread queue is enqueued on another thread
0180        * queue owned by the calling thread.
0181        */
0182       ctx->more = false;
0183       break;
0184     }
0185 
0186     case ScoreTqReqEnqueueDeadlock_Pre_Deadlock_More: {
0187       /*
0188        * While the owner of the thread queue is enqueued on another thread
0189        * queue owned by a thread other than the calling thread, and so on,
0190        * while the owner of the last thread queue of this dependency chain is
0191        * enqueued on a thread queue owned by the calling thread.
0192        */
0193       ctx->more = true;
0194       break;
0195     }
0196 
0197     case ScoreTqReqEnqueueDeadlock_Pre_Deadlock_NA:
0198       break;
0199   }
0200 }
0201 
0202 static void ScoreTqReqEnqueueDeadlock_Post_Result_Check(
0203   ScoreTqReqEnqueueDeadlock_Context    *ctx,
0204   ScoreTqReqEnqueueDeadlock_Post_Result state
0205 )
0206 {
0207   switch ( state ) {
0208     case ScoreTqReqEnqueueDeadlock_Post_Result_Status: {
0209       /*
0210        * The return status of the directive call shall be derived from
0211        * STATUS_DEADLOCK.
0212        */
0213       /* Checked by action */
0214       break;
0215     }
0216 
0217     case ScoreTqReqEnqueueDeadlock_Post_Result_Fatal: {
0218       /*
0219        * The system shall terminate with the INTERNAL_ERROR_CORE fatal source
0220        * and the INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK fatal code.
0221        */
0222       /* Checked by action */
0223       break;
0224     }
0225 
0226     case ScoreTqReqEnqueueDeadlock_Post_Result_NA:
0227       break;
0228   }
0229 }
0230 
0231 static void ScoreTqReqEnqueueDeadlock_Action(
0232   ScoreTqReqEnqueueDeadlock_Context *ctx
0233 )
0234 {
0235   Status_Control status;
0236 
0237   if ( ctx->tq_ctx->enqueue_variant == TQ_ENQUEUE_STICKY ) {
0238     TQSetScheduler( ctx->tq_ctx, TQ_BLOCKER_A, SCHEDULER_B_ID, PRIO_NORMAL );
0239   } else {
0240     TQSetScheduler(
0241       ctx->tq_ctx,
0242       TQ_BLOCKER_A,
0243       SCHEDULER_A_ID,
0244       PRIO_VERY_HIGH
0245     );
0246   }
0247 
0248   TQSetScheduler( ctx->tq_ctx, TQ_BLOCKER_B, SCHEDULER_A_ID, PRIO_HIGH );
0249   TQSetScheduler( ctx->tq_ctx, TQ_BLOCKER_C, SCHEDULER_A_ID, PRIO_HIGH );
0250 
0251   TQSortMutexesByID( ctx->tq_ctx );
0252   TQMutexObtain( ctx->tq_ctx, TQ_MUTEX_C );
0253   TQSendAndWaitForExecutionStop( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE );
0254 
0255   if ( ctx->more ) {
0256     TQSend( ctx->tq_ctx, TQ_BLOCKER_B, TQ_EVENT_MUTEX_A_OBTAIN );
0257     TQSend( ctx->tq_ctx, TQ_BLOCKER_B, TQ_EVENT_MUTEX_C_OBTAIN );
0258     Yield();
0259     TQSend( ctx->tq_ctx, TQ_BLOCKER_C, TQ_EVENT_MUTEX_B_OBTAIN );
0260     Yield();
0261     TQSend( ctx->tq_ctx, TQ_BLOCKER_C, TQ_EVENT_MUTEX_A_OBTAIN );
0262     Yield();
0263     TQSendAndWaitForExecutionStop(
0264       ctx->tq_ctx,
0265       TQ_BLOCKER_A,
0266       TQ_EVENT_MUTEX_B_OBTAIN
0267     );
0268   } else {
0269     TQSendAndWaitForExecutionStop(
0270       ctx->tq_ctx,
0271       TQ_BLOCKER_A,
0272       TQ_EVENT_MUTEX_C_OBTAIN
0273     );
0274   }
0275 
0276   if ( ctx->tq_ctx->deadlock == TQ_DEADLOCK_FATAL ) {
0277     status = TQEnqueueFatal( ctx->tq_ctx );
0278     T_eq_int( status, STATUS_DEADLOCK );
0279   } else {
0280     status = TQEnqueue( ctx->tq_ctx, TQ_WAIT_FOREVER );
0281     T_eq_int( status, TQConvertStatus( ctx->tq_ctx, STATUS_DEADLOCK ) );
0282   }
0283 
0284   TQMutexRelease( ctx->tq_ctx, TQ_MUTEX_C );
0285 
0286   if ( ctx->more ) {
0287     TQSend( ctx->tq_ctx, TQ_BLOCKER_B, TQ_EVENT_MUTEX_C_RELEASE );
0288     TQSend( ctx->tq_ctx, TQ_BLOCKER_B, TQ_EVENT_MUTEX_A_RELEASE );
0289     TQSend( ctx->tq_ctx, TQ_BLOCKER_C, TQ_EVENT_MUTEX_A_RELEASE );
0290     TQSend( ctx->tq_ctx, TQ_BLOCKER_C, TQ_EVENT_MUTEX_B_RELEASE );
0291     TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_MUTEX_B_RELEASE );
0292   } else {
0293     TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_MUTEX_C_RELEASE );
0294   }
0295 
0296   if ( ctx->tq_ctx->enqueue_variant == TQ_ENQUEUE_STICKY ) {
0297     TQSend(
0298       ctx->tq_ctx,
0299       TQ_BLOCKER_A,
0300       TQ_EVENT_SURRENDER | TQ_EVENT_RUNNER_SYNC
0301     );
0302     TQSynchronizeRunner();
0303     TQSetScheduler( ctx->tq_ctx, TQ_BLOCKER_A, SCHEDULER_A_ID, PRIO_HIGH );
0304   } else {
0305     TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_SURRENDER );
0306   }
0307 }
0308 
0309 static const ScoreTqReqEnqueueDeadlock_Entry
0310 ScoreTqReqEnqueueDeadlock_Entries[] = {
0311   { 0, 0, 0, ScoreTqReqEnqueueDeadlock_Post_Result_Status },
0312   { 0, 0, 0, ScoreTqReqEnqueueDeadlock_Post_Result_Fatal }
0313 };
0314 
0315 static const uint8_t
0316 ScoreTqReqEnqueueDeadlock_Map[] = {
0317   0, 0, 1, 1
0318 };
0319 
0320 static size_t ScoreTqReqEnqueueDeadlock_Scope( void *arg, char *buf, size_t n )
0321 {
0322   ScoreTqReqEnqueueDeadlock_Context *ctx;
0323 
0324   ctx = arg;
0325 
0326   if ( ctx->Map.in_action_loop ) {
0327     return T_get_scope(
0328       ScoreTqReqEnqueueDeadlock_PreDesc,
0329       buf,
0330       n,
0331       ctx->Map.pcs
0332     );
0333   }
0334 
0335   return 0;
0336 }
0337 
0338 static T_fixture ScoreTqReqEnqueueDeadlock_Fixture = {
0339   .setup = NULL,
0340   .stop = NULL,
0341   .teardown = NULL,
0342   .scope = ScoreTqReqEnqueueDeadlock_Scope,
0343   .initial_context = &ScoreTqReqEnqueueDeadlock_Instance
0344 };
0345 
0346 static const uint8_t ScoreTqReqEnqueueDeadlock_Weights[] = {
0347   2, 1
0348 };
0349 
0350 static void ScoreTqReqEnqueueDeadlock_Skip(
0351   ScoreTqReqEnqueueDeadlock_Context *ctx,
0352   size_t                             index
0353 )
0354 {
0355   switch ( index + 1 ) {
0356     case 1:
0357       ctx->Map.pcs[ 1 ] = ScoreTqReqEnqueueDeadlock_Pre_Deadlock_NA - 1;
0358       break;
0359   }
0360 }
0361 
0362 static inline ScoreTqReqEnqueueDeadlock_Entry
0363 ScoreTqReqEnqueueDeadlock_PopEntry( ScoreTqReqEnqueueDeadlock_Context *ctx )
0364 {
0365   size_t index;
0366 
0367   if ( ctx->Map.skip ) {
0368     size_t i;
0369 
0370     ctx->Map.skip = false;
0371     index = 0;
0372 
0373     for ( i = 0; i < 2; ++i ) {
0374       index += ScoreTqReqEnqueueDeadlock_Weights[ i ] * ctx->Map.pcs[ i ];
0375     }
0376   } else {
0377     index = ctx->Map.index;
0378   }
0379 
0380   ctx->Map.index = index + 1;
0381 
0382   return ScoreTqReqEnqueueDeadlock_Entries[
0383     ScoreTqReqEnqueueDeadlock_Map[ index ]
0384   ];
0385 }
0386 
0387 static void ScoreTqReqEnqueueDeadlock_TestVariant(
0388   ScoreTqReqEnqueueDeadlock_Context *ctx
0389 )
0390 {
0391   ScoreTqReqEnqueueDeadlock_Pre_Notification_Prepare( ctx, ctx->Map.pcs[ 0 ] );
0392 
0393   if ( ctx->Map.skip ) {
0394     ScoreTqReqEnqueueDeadlock_Skip( ctx, 0 );
0395     return;
0396   }
0397 
0398   ScoreTqReqEnqueueDeadlock_Pre_Deadlock_Prepare( ctx, ctx->Map.pcs[ 1 ] );
0399   ScoreTqReqEnqueueDeadlock_Action( ctx );
0400   ScoreTqReqEnqueueDeadlock_Post_Result_Check(
0401     ctx,
0402     ctx->Map.entry.Post_Result
0403   );
0404 }
0405 
0406 static T_fixture_node ScoreTqReqEnqueueDeadlock_Node;
0407 
0408 static T_remark ScoreTqReqEnqueueDeadlock_Remark = {
0409   .next = NULL,
0410   .remark = "ScoreTqReqEnqueueDeadlock"
0411 };
0412 
0413 void ScoreTqReqEnqueueDeadlock_Run( TQContext *tq_ctx )
0414 {
0415   ScoreTqReqEnqueueDeadlock_Context *ctx;
0416 
0417   ctx = &ScoreTqReqEnqueueDeadlock_Instance;
0418   ctx->tq_ctx = tq_ctx;
0419 
0420   ctx = T_push_fixture(
0421     &ScoreTqReqEnqueueDeadlock_Node,
0422     &ScoreTqReqEnqueueDeadlock_Fixture
0423   );
0424   ctx->Map.in_action_loop = true;
0425   ctx->Map.index = 0;
0426   ctx->Map.skip = false;
0427 
0428   for (
0429     ctx->Map.pcs[ 0 ] = ScoreTqReqEnqueueDeadlock_Pre_Notification_Status;
0430     ctx->Map.pcs[ 0 ] < ScoreTqReqEnqueueDeadlock_Pre_Notification_NA;
0431     ++ctx->Map.pcs[ 0 ]
0432   ) {
0433     for (
0434       ctx->Map.pcs[ 1 ] = ScoreTqReqEnqueueDeadlock_Pre_Deadlock_One;
0435       ctx->Map.pcs[ 1 ] < ScoreTqReqEnqueueDeadlock_Pre_Deadlock_NA;
0436       ++ctx->Map.pcs[ 1 ]
0437     ) {
0438       ctx->Map.entry = ScoreTqReqEnqueueDeadlock_PopEntry( ctx );
0439       ScoreTqReqEnqueueDeadlock_TestVariant( ctx );
0440     }
0441   }
0442 
0443   T_add_remark( &ScoreTqReqEnqueueDeadlock_Remark );
0444   T_pop_fixture();
0445 }
0446 
0447 /** @} */