File indexing completed on 2025-05-11 08:24:52
0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051 #ifdef HAVE_CONFIG_H
0052 #include "config.h"
0053 #endif
0054
0055 #include <errno.h>
0056 #include <string.h>
0057 #include <sys/lock.h>
0058
0059 #include "tr-mtx-seize-try.h"
0060 #include "tr-mtx-seize-wait.h"
0061 #include "tr-mtx-surrender.h"
0062 #include "tr-tq-timeout-priority-inherit.h"
0063 #include "tx-thread-queue.h"
0064
0065 #include <rtems/test.h>
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118 typedef struct {
0119
0120
0121
0122 TQMtxContext tq_mtx_ctx;
0123 } NewlibValSysLock_Context;
0124
0125 static NewlibValSysLock_Context
0126 NewlibValSysLock_Instance;
0127
0128 static Status_Control Enqueue( TQContext *ctx, TQWait wait )
0129 {
0130 const struct timespec abstime = {
0131 .tv_sec = INT64_MAX,
0132 .tv_nsec = 0
0133 };
0134 int eno;
0135
0136 switch ( wait ) {
0137 case TQ_NO_WAIT:
0138 eno = _Mutex_Try_acquire( ctx->thread_queue_object );
0139 break;
0140 case TQ_WAIT_FOREVER:
0141 _Mutex_Acquire( ctx->thread_queue_object );
0142 eno = 0;
0143 break;
0144 case TQ_WAIT_TIMED:
0145 eno = _Mutex_Acquire_timed( ctx->thread_queue_object, &abstime );
0146 break;
0147 default:
0148 T_unreachable();
0149 break;
0150 }
0151
0152 return STATUS_BUILD( 0, eno );
0153 }
0154
0155 static Status_Control Surrender( TQContext *ctx )
0156 {
0157 _Mutex_Release( ctx->thread_queue_object );
0158
0159 return STATUS_SUCCESSFUL;
0160 }
0161
0162 static rtems_tcb *GetOwner( TQContext *ctx )
0163 {
0164 const struct _Mutex_Control *mutex;
0165
0166 mutex = ctx->thread_queue_object;
0167
0168 return mutex->_Queue._owner;
0169 }
0170
0171 static Status_Control RecursiveEnqueue( TQContext *ctx, TQWait wait )
0172 {
0173 const struct timespec abstime = {
0174 .tv_sec = INT64_MAX,
0175 .tv_nsec = 0
0176 };
0177 int eno;
0178
0179 switch ( wait ) {
0180 case TQ_NO_WAIT:
0181 eno = _Mutex_recursive_Try_acquire( ctx->thread_queue_object );
0182 break;
0183 case TQ_WAIT_FOREVER:
0184 _Mutex_recursive_Acquire( ctx->thread_queue_object );
0185 eno = 0;
0186 break;
0187 case TQ_WAIT_TIMED:
0188 eno = _Mutex_recursive_Acquire_timed(
0189 ctx->thread_queue_object,
0190 &abstime
0191 );
0192 break;
0193 default:
0194 T_unreachable();
0195 break;
0196 }
0197
0198 return STATUS_BUILD( 0, eno );
0199 }
0200
0201 static Status_Control RecursiveSurrender( TQContext *ctx )
0202 {
0203 _Mutex_recursive_Release( ctx->thread_queue_object );
0204
0205 return STATUS_SUCCESSFUL;
0206 }
0207
0208 static rtems_tcb *RecursiveGetOwner( TQContext *ctx )
0209 {
0210 const struct _Mutex_recursive_Control *mutex;
0211
0212 mutex = ctx->thread_queue_object;
0213
0214 return mutex->_Mutex._Queue._owner;
0215 }
0216
0217 static void NewlibValSysLock_Setup( NewlibValSysLock_Context *ctx )
0218 {
0219 memset( ctx, 0, sizeof( *ctx ) );
0220 ctx->tq_mtx_ctx.base.enqueue_variant = TQ_ENQUEUE_BLOCKS;
0221 ctx->tq_mtx_ctx.base.discipline = TQ_PRIORITY;
0222 ctx->tq_mtx_ctx.base.deadlock = TQ_DEADLOCK_FATAL;
0223 ctx->tq_mtx_ctx.base.convert_status = TQConvertStatusPOSIX;
0224 ctx->tq_mtx_ctx.protocol = TQ_MTX_PRIORITY_INHERIT;
0225 ctx->tq_mtx_ctx.owner_check = TQ_MTX_NO_OWNER_CHECK;
0226 ctx->tq_mtx_ctx.priority_ceiling = PRIO_INVALID;
0227 TQInitialize( &ctx->tq_mtx_ctx.base );
0228 }
0229
0230 static void NewlibValSysLock_Setup_Wrap( void *arg )
0231 {
0232 NewlibValSysLock_Context *ctx;
0233
0234 ctx = arg;
0235 NewlibValSysLock_Setup( ctx );
0236 }
0237
0238 static void NewlibValSysLock_Teardown( NewlibValSysLock_Context *ctx )
0239 {
0240 TQDestroy( &ctx->tq_mtx_ctx.base );
0241 RestoreRunnerPriority();
0242 }
0243
0244 static void NewlibValSysLock_Teardown_Wrap( void *arg )
0245 {
0246 NewlibValSysLock_Context *ctx;
0247
0248 ctx = arg;
0249 NewlibValSysLock_Teardown( ctx );
0250 }
0251
0252 static T_fixture NewlibValSysLock_Fixture = {
0253 .setup = NewlibValSysLock_Setup_Wrap,
0254 .stop = NULL,
0255 .teardown = NewlibValSysLock_Teardown_Wrap,
0256 .scope = NULL,
0257 .initial_context = &NewlibValSysLock_Instance
0258 };
0259
0260
0261
0262
0263 static void NewlibValSysLock_Action_0( NewlibValSysLock_Context *ctx )
0264 {
0265 const struct timespec invalid_abstime = {
0266 .tv_sec = -1,
0267 .tv_nsec = -1
0268 };
0269 int eno;
0270 struct _Mutex_Control mutex;
0271
0272 _Mutex_Initialize( &mutex );
0273
0274 ctx->tq_mtx_ctx.base.thread_queue_object = &mutex;
0275 ctx->tq_mtx_ctx.base.enqueue_prepare = TQEnqueuePrepareDefault;
0276 ctx->tq_mtx_ctx.base.enqueue_done = TQEnqueueDoneDefault;
0277 ctx->tq_mtx_ctx.base.enqueue = Enqueue;
0278 ctx->tq_mtx_ctx.base.surrender = Surrender;
0279 ctx->tq_mtx_ctx.base.get_owner = GetOwner;
0280
0281
0282
0283
0284 ctx->tq_mtx_ctx.base.wait = TQ_NO_WAIT;
0285 ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_UNAVAILABLE;
0286 ScoreMtxReqSeizeTry_Run( &ctx->tq_mtx_ctx );
0287
0288
0289
0290
0291
0292 ctx->tq_mtx_ctx.base.wait = TQ_WAIT_TIMED;
0293 ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_DEADLOCK;
0294 ScoreMtxReqSeizeWait_Run( &ctx->tq_mtx_ctx );
0295
0296
0297
0298
0299
0300 TQSetScheduler(
0301 &ctx->tq_mtx_ctx.base,
0302 TQ_HELPER_A,
0303 SCHEDULER_A_ID,
0304 PRIO_HIGH
0305 );
0306 TQSend( &ctx->tq_mtx_ctx.base, TQ_HELPER_A, TQ_EVENT_ENQUEUE );
0307 eno = _Mutex_Acquire_timed( &mutex, &invalid_abstime );
0308 T_eq_int( eno, EINVAL );
0309 TQSend( &ctx->tq_mtx_ctx.base, TQ_HELPER_A, TQ_EVENT_SURRENDER );
0310
0311
0312
0313
0314 ctx->tq_mtx_ctx.base.wait = TQ_WAIT_FOREVER;
0315 ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_DEADLOCK;
0316 ScoreTqReqTimeoutPriorityInherit_Run(
0317 &ctx->tq_mtx_ctx.base
0318 );
0319
0320
0321
0322
0323 ctx->tq_mtx_ctx.base.wait = TQ_WAIT_FOREVER;
0324 ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_DEADLOCK;
0325 ScoreMtxReqSeizeWait_Run( &ctx->tq_mtx_ctx );
0326
0327
0328
0329
0330 ctx->tq_mtx_ctx.base.wait = TQ_WAIT_FOREVER;
0331 ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_DEADLOCK;
0332 ScoreMtxReqSurrender_Run( &ctx->tq_mtx_ctx );
0333
0334
0335
0336
0337 _Mutex_Destroy( &mutex );
0338 }
0339
0340
0341
0342
0343 static void NewlibValSysLock_Action_1( NewlibValSysLock_Context *ctx )
0344 {
0345 const struct timespec invalid_abstime = {
0346 .tv_sec = -1,
0347 .tv_nsec = -1
0348 };
0349 int eno;
0350 struct _Mutex_recursive_Control mutex;
0351
0352 _Mutex_recursive_Initialize( &mutex );
0353
0354 ctx->tq_mtx_ctx.base.thread_queue_object = &mutex;
0355 ctx->tq_mtx_ctx.base.enqueue_prepare = TQEnqueuePrepareDefault;
0356 ctx->tq_mtx_ctx.base.enqueue_done = TQEnqueueDoneDefault;
0357 ctx->tq_mtx_ctx.base.enqueue = RecursiveEnqueue;
0358 ctx->tq_mtx_ctx.base.surrender = RecursiveSurrender;
0359 ctx->tq_mtx_ctx.base.get_owner = RecursiveGetOwner;
0360
0361
0362
0363
0364 ctx->tq_mtx_ctx.base.wait = TQ_NO_WAIT;
0365 ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_ALLOWED;
0366 ScoreMtxReqSeizeTry_Run( &ctx->tq_mtx_ctx );
0367
0368
0369
0370
0371
0372 ctx->tq_mtx_ctx.base.wait = TQ_WAIT_TIMED;
0373 ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_ALLOWED;
0374 ScoreMtxReqSeizeWait_Run( &ctx->tq_mtx_ctx );
0375
0376
0377
0378
0379
0380 TQSetScheduler(
0381 &ctx->tq_mtx_ctx.base,
0382 TQ_HELPER_A,
0383 SCHEDULER_A_ID,
0384 PRIO_HIGH
0385 );
0386 TQSend( &ctx->tq_mtx_ctx.base, TQ_HELPER_A, TQ_EVENT_ENQUEUE );
0387 eno = _Mutex_recursive_Acquire_timed( &mutex, &invalid_abstime );
0388 T_eq_int( eno, EINVAL );
0389 TQSend( &ctx->tq_mtx_ctx.base, TQ_HELPER_A, TQ_EVENT_SURRENDER );
0390
0391
0392
0393
0394 ctx->tq_mtx_ctx.base.wait = TQ_WAIT_FOREVER;
0395 ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_ALLOWED;
0396 ScoreTqReqTimeoutPriorityInherit_Run(
0397 &ctx->tq_mtx_ctx.base
0398 );
0399
0400
0401
0402
0403 ctx->tq_mtx_ctx.base.wait = TQ_WAIT_FOREVER;
0404 ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_ALLOWED;
0405 ScoreMtxReqSeizeWait_Run( &ctx->tq_mtx_ctx );
0406
0407
0408
0409
0410 ctx->tq_mtx_ctx.base.wait = TQ_WAIT_FOREVER;
0411 ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_ALLOWED;
0412 ScoreMtxReqSurrender_Run( &ctx->tq_mtx_ctx );
0413
0414
0415
0416
0417 _Mutex_recursive_Destroy( &mutex );
0418 }
0419
0420
0421
0422
0423 T_TEST_CASE_FIXTURE( NewlibValSysLock, &NewlibValSysLock_Fixture )
0424 {
0425 NewlibValSysLock_Context *ctx;
0426
0427 ctx = T_fixture_context();
0428
0429 NewlibValSysLock_Action_0( ctx );
0430 NewlibValSysLock_Action_1( ctx );
0431 }
0432
0433