File indexing completed on 2025-05-11 08:24:54
0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037 #ifdef HAVE_CONFIG_H
0038 #include "config.h"
0039 #endif
0040
0041 #include "tx-thread-queue.h"
0042 #include "tx-support.h"
0043 #include "ts-config.h"
0044
0045 #include <rtems/score/threadimpl.h>
0046 #include <rtems/rtems/semimpl.h>
0047
0048 void TQSend(
0049 TQContext *ctx,
0050 TQWorkerKind worker,
0051 rtems_event_set events
0052 )
0053 {
0054 #if defined( RTEMS_SMP )
0055 ctx->event_received[ worker ] = false;
0056 #endif
0057
0058 SendEvents( ctx->worker_id[ worker ], events );
0059 }
0060
0061 void TQWaitForEventsReceived( const TQContext *ctx, TQWorkerKind worker )
0062 {
0063 #if defined( RTEMS_SMP )
0064 while ( !ctx->event_received[ worker ] ) {
0065
0066 }
0067 #endif
0068 }
0069
0070 void TQWaitForExecutionStop( const TQContext *ctx, TQWorkerKind worker )
0071 {
0072 #if defined( RTEMS_SMP )
0073 WaitForExecutionStop( ctx->worker_id[ worker ] );
0074 #endif
0075 }
0076
0077 void TQSendAndWaitForExecutionStop(
0078 TQContext *ctx,
0079 TQWorkerKind worker,
0080 rtems_event_set events
0081 )
0082 {
0083 TQSend( ctx, worker, events );
0084
0085 #if defined( RTEMS_SMP )
0086 TQWaitForEventsReceived( ctx, worker );
0087 WaitForExecutionStop( ctx->worker_id[ worker ] );
0088 #endif
0089 }
0090
0091 void TQWaitForIntendToBlock( const TQContext *ctx, TQWorkerKind worker )
0092 {
0093 const rtems_tcb *thread;
0094 Thread_Wait_flags intend_to_block;
0095
0096 thread = ctx->worker_tcb[ worker ];
0097 intend_to_block = THREAD_WAIT_CLASS_OBJECT |
0098 THREAD_WAIT_STATE_INTEND_TO_BLOCK;
0099
0100 while ( _Thread_Wait_flags_get_acquire( thread ) != intend_to_block ) {
0101
0102 }
0103 }
0104
0105 void TQSendAndWaitForIntendToBlock(
0106 TQContext *ctx,
0107 TQWorkerKind worker,
0108 rtems_event_set events
0109 )
0110 {
0111 TQSend( ctx, worker, events );
0112
0113 #if defined( RTEMS_SMP )
0114 TQWaitForEventsReceived( ctx, worker );
0115 TQWaitForIntendToBlock( ctx, worker );
0116 #endif
0117 }
0118
0119 void TQSendAndWaitForExecutionStopOrIntendToBlock(
0120 TQContext *ctx,
0121 TQWorkerKind worker,
0122 rtems_event_set events
0123 )
0124 {
0125 #if defined( RTEMS_SMP )
0126 const rtems_tcb *thread;
0127 Thread_Wait_flags intend_to_block;
0128 #endif
0129
0130 TQSend( ctx, worker, events );
0131
0132 #if defined( RTEMS_SMP )
0133 TQWaitForEventsReceived( ctx, worker );
0134 thread = ctx->worker_tcb[ worker ];
0135 intend_to_block = THREAD_WAIT_CLASS_OBJECT |
0136 THREAD_WAIT_STATE_INTEND_TO_BLOCK;
0137
0138 while (
0139 _Thread_Is_executing_on_a_processor( thread ) &&
0140 _Thread_Wait_flags_get_acquire( thread ) != intend_to_block
0141 ) {
0142
0143 }
0144 #endif
0145 }
0146
0147 void TQSendAndSynchronizeRunner(
0148 TQContext *ctx,
0149 TQWorkerKind worker,
0150 rtems_event_set events
0151 )
0152 {
0153 T_quiet_eq_u32( QueryPendingEvents() & TQ_EVENT_RUNNER_SYNC, 0 );
0154 TQSend( ctx, worker, events | TQ_EVENT_RUNNER_SYNC );
0155 TQSynchronizeRunner();
0156 }
0157
0158 void TQClearDone( TQContext *ctx, TQWorkerKind worker )
0159 {
0160 ctx->done[ worker ] = false;
0161 }
0162
0163 void TQWaitForDone( const TQContext *ctx, TQWorkerKind worker )
0164 {
0165 while ( !ctx->done[ worker ] ) {
0166
0167 }
0168 }
0169
0170 void TQSynchronizeRunner( void )
0171 {
0172 ReceiveAllEvents( TQ_EVENT_RUNNER_SYNC );
0173 }
0174
0175 void TQSynchronizeRunner2( void )
0176 {
0177 ReceiveAllEvents( TQ_EVENT_RUNNER_SYNC | TQ_EVENT_RUNNER_SYNC_2 );
0178 }
0179
0180 void TQResetCounter( TQContext *ctx )
0181 {
0182 ctx->counter = 0;
0183 memset( &ctx->worker_counter, 0, sizeof( ctx->worker_counter ) );
0184 }
0185
0186 uint32_t TQGetCounter( const TQContext *ctx )
0187 {
0188 return ctx->counter;
0189 }
0190
0191 uint32_t TQGetWorkerCounter( const TQContext *ctx, TQWorkerKind worker )
0192 {
0193 return ctx->worker_counter[ worker ];
0194 }
0195
0196 void TQMutexObtain( const TQContext *ctx, TQMutex mutex )
0197 {
0198 rtems_status_code sc;
0199
0200 sc = rtems_semaphore_obtain(
0201 ctx->mutex_id[ mutex ],
0202 RTEMS_WAIT,
0203 RTEMS_NO_TIMEOUT
0204 );
0205 T_rsc_success( sc );
0206 }
0207
0208 void TQMutexRelease( const TQContext *ctx, TQMutex mutex )
0209 {
0210 rtems_status_code sc;
0211
0212 sc = rtems_semaphore_release( ctx->mutex_id[ mutex ] );
0213 T_rsc_success( sc );
0214 }
0215
0216 void TQSetPriority(
0217 const TQContext *ctx,
0218 TQWorkerKind worker,
0219 Priority priority
0220 )
0221 {
0222 SetPriority( ctx->worker_id[ worker ], priority );
0223 }
0224
0225 Priority TQGetPriority( const TQContext *ctx, TQWorkerKind worker )
0226 {
0227 return GetPriority( ctx->worker_id[ worker ] );
0228 }
0229
0230 void TQSetScheduler(
0231 const TQContext *ctx,
0232 TQWorkerKind worker,
0233 rtems_id scheduler_id,
0234 Priority priority
0235 )
0236 {
0237 #if defined( RTEMS_SMP )
0238 rtems_status_code sc;
0239
0240 sc = rtems_task_set_scheduler(
0241 ctx->worker_id[ worker ],
0242 scheduler_id,
0243 priority
0244 );
0245 T_rsc_success( sc );
0246 #else
0247 (void) scheduler_id;
0248 SetPriority( ctx->worker_id[ worker ], priority );
0249 #endif
0250 }
0251
0252 static void Count( TQContext *ctx, TQWorkerKind worker )
0253 {
0254 unsigned int counter;
0255
0256 counter = _Atomic_Fetch_add_uint( &ctx->counter, 1, ATOMIC_ORDER_RELAXED );
0257 ctx->worker_counter[ worker ] = counter + 1;
0258 }
0259
0260 static void Enqueue( TQContext *ctx, TQWorkerKind worker, TQWait wait )
0261 {
0262 ctx->status[ worker ] = TQEnqueue( ctx, wait );
0263 Count( ctx, worker );
0264 }
0265
0266 static void ThreadQueueDeadlock(
0267 rtems_fatal_source source,
0268 rtems_fatal_code code,
0269 void *arg
0270 )
0271 {
0272 TQContext *ctx;
0273
0274 ctx = arg;
0275 T_eq_int( source, INTERNAL_ERROR_CORE );
0276 T_eq_int( code, INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK );
0277 SetFatalHandler( NULL, NULL );
0278 _ISR_Set_level( 0 );
0279 longjmp( ctx->before_enqueue, 1 );
0280 }
0281
0282 static void Worker( rtems_task_argument arg, TQWorkerKind worker )
0283 {
0284 TQContext *ctx;
0285
0286 ctx = (TQContext *) arg;
0287
0288 while ( true ) {
0289 rtems_event_set events;
0290
0291 events = ReceiveAnyEvents();
0292 ctx->event_received[ worker ] = true;
0293
0294 if ( ( events & TQ_EVENT_HELPER_A_SYNC ) != 0 ) {
0295 SendEvents( ctx->worker_id[ TQ_HELPER_A ], TQ_EVENT_RUNNER_SYNC );
0296 }
0297
0298 if ( ( events & TQ_EVENT_HELPER_B_SYNC ) != 0 ) {
0299 SendEvents( ctx->worker_id[ TQ_HELPER_B ], TQ_EVENT_RUNNER_SYNC );
0300 }
0301
0302 if ( ( events & TQ_EVENT_SCHEDULER_RECORD_START ) != 0 ) {
0303 TQSchedulerRecordStart( ctx );
0304 }
0305
0306 if ( ( events & TQ_EVENT_ENQUEUE_PREPARE ) != 0 ) {
0307 TQEnqueuePrepare( ctx );
0308 }
0309
0310 if ( ( events & TQ_EVENT_ENQUEUE ) != 0 ) {
0311 Enqueue( ctx, worker, ctx->wait );
0312 }
0313
0314 if ( ( events & TQ_EVENT_ENQUEUE_TIMED ) != 0 ) {
0315 Enqueue( ctx, worker, TQ_WAIT_TIMED );
0316 }
0317
0318 if ( ( events & TQ_EVENT_ENQUEUE_FATAL ) != 0 ) {
0319 SetFatalHandler( ThreadQueueDeadlock, ctx );
0320
0321 if ( setjmp( ctx->before_enqueue ) == 0 ) {
0322 ctx->status[ worker ] = STATUS_MINUS_ONE;
0323 Enqueue( ctx, worker, ctx->wait );
0324 } else {
0325 ctx->status[ worker ] = STATUS_DEADLOCK;
0326 }
0327 }
0328
0329 if ( ( events & TQ_EVENT_TIMEOUT ) != 0 ) {
0330 Per_CPU_Control *cpu_self;
0331
0332 cpu_self = _Thread_Dispatch_disable();
0333 _Thread_Timeout( &ctx->worker_tcb[ worker ]->Timer.Watchdog );
0334 _Thread_Dispatch_direct( cpu_self );
0335 }
0336
0337 if ( ( events & TQ_EVENT_FLUSH_ALL ) != 0 ) {
0338 TQFlush( ctx, true );
0339 }
0340
0341 if ( ( events & TQ_EVENT_FLUSH_PARTIAL ) != 0 ) {
0342 TQFlush( ctx, false );
0343 }
0344
0345 if ( ( events & TQ_EVENT_ENQUEUE_DONE ) != 0 ) {
0346 TQEnqueueDone( ctx );
0347 }
0348
0349 if ( ( events & TQ_EVENT_SURRENDER ) != 0 ) {
0350 Status_Control status;
0351
0352 status = TQSurrender( ctx );
0353 T_eq_int( status, TQConvertStatus( ctx, STATUS_SUCCESSFUL ) );
0354 }
0355
0356 if ( ( events & TQ_EVENT_MUTEX_A_OBTAIN ) != 0 ) {
0357 TQMutexObtain( ctx, TQ_MUTEX_A );
0358 }
0359
0360 if ( ( events & TQ_EVENT_MUTEX_A_RELEASE ) != 0 ) {
0361 TQMutexRelease( ctx, TQ_MUTEX_A );
0362 }
0363
0364 if ( ( events & TQ_EVENT_MUTEX_B_OBTAIN ) != 0 ) {
0365 TQMutexObtain( ctx, TQ_MUTEX_B );
0366 }
0367
0368 if ( ( events & TQ_EVENT_MUTEX_B_RELEASE ) != 0 ) {
0369 TQMutexRelease( ctx, TQ_MUTEX_B );
0370 }
0371
0372 if ( ( events & TQ_EVENT_MUTEX_C_OBTAIN ) != 0 ) {
0373 TQMutexObtain( ctx, TQ_MUTEX_C );
0374 }
0375
0376 if ( ( events & TQ_EVENT_MUTEX_C_RELEASE ) != 0 ) {
0377 TQMutexRelease( ctx, TQ_MUTEX_C );
0378 }
0379
0380 if ( ( events & TQ_EVENT_MUTEX_D_OBTAIN ) != 0 ) {
0381 TQMutexObtain( ctx, TQ_MUTEX_D );
0382 }
0383
0384 if ( ( events & TQ_EVENT_MUTEX_D_RELEASE ) != 0 ) {
0385 TQMutexRelease( ctx, TQ_MUTEX_D );
0386 }
0387
0388 if ( ( events & TQ_EVENT_MUTEX_NO_PROTOCOL_OBTAIN ) != 0 ) {
0389 TQMutexObtain( ctx, TQ_MUTEX_NO_PROTOCOL );
0390 }
0391
0392 if ( ( events & TQ_EVENT_MUTEX_NO_PROTOCOL_RELEASE ) != 0 ) {
0393 TQMutexRelease( ctx, TQ_MUTEX_NO_PROTOCOL );
0394 }
0395
0396 if ( ( events & TQ_EVENT_MUTEX_FIFO_OBTAIN ) != 0 ) {
0397 TQMutexObtain( ctx, TQ_MUTEX_FIFO );
0398 }
0399
0400 if ( ( events & TQ_EVENT_MUTEX_FIFO_RELEASE ) != 0 ) {
0401 TQMutexRelease( ctx, TQ_MUTEX_FIFO );
0402 }
0403
0404 if ( ( events & TQ_EVENT_PIN ) != 0 ) {
0405 _Thread_Pin( _Thread_Get_executing() );
0406 }
0407
0408 if ( ( events & TQ_EVENT_UNPIN ) != 0 ) {
0409 Per_CPU_Control *cpu_self;
0410
0411 cpu_self = _Thread_Dispatch_disable();
0412 _Thread_Unpin( _Thread_Get_executing(), cpu_self );
0413 _Thread_Dispatch_direct( cpu_self );
0414 }
0415
0416 if ( ( events & TQ_EVENT_SCHEDULER_RECORD_STOP ) != 0 ) {
0417 TQSchedulerRecordStop( ctx );
0418 }
0419
0420 if ( ( events & TQ_EVENT_RUNNER_SYNC ) != 0 ) {
0421 SendEvents( ctx->runner_id, TQ_EVENT_RUNNER_SYNC );
0422 }
0423
0424 if ( ( events & TQ_EVENT_COUNT ) != 0 ) {
0425 Count( ctx, worker );
0426 }
0427
0428 if ( ( events & TQ_EVENT_BUSY_WAIT ) != 0 ) {
0429 while ( ctx->busy_wait[ worker ] ) {
0430
0431 }
0432 }
0433
0434 if ( ( events & TQ_EVENT_RUNNER_SYNC_2 ) != 0 ) {
0435 SendEvents( ctx->runner_id, TQ_EVENT_RUNNER_SYNC_2 );
0436 }
0437
0438 ctx->done[ worker ] = true;
0439 }
0440 }
0441
0442 static void BlockerA( rtems_task_argument arg )
0443 {
0444 Worker( arg, TQ_BLOCKER_A );
0445 }
0446
0447 static void BlockerB( rtems_task_argument arg )
0448 {
0449 Worker( arg, TQ_BLOCKER_B );
0450 }
0451
0452 static void BlockerC( rtems_task_argument arg )
0453 {
0454 Worker( arg, TQ_BLOCKER_C );
0455 }
0456
0457 static void BlockerD( rtems_task_argument arg )
0458 {
0459 Worker( arg, TQ_BLOCKER_D );
0460 }
0461
0462 static void BlockerE( rtems_task_argument arg )
0463 {
0464 Worker( arg, TQ_BLOCKER_E );
0465 }
0466
0467 static void WorkerF( rtems_task_argument arg )
0468 {
0469 Worker( arg, TQ_WORKER_F );
0470 }
0471
0472 static void HelperA( rtems_task_argument arg )
0473 {
0474 Worker( arg, TQ_HELPER_A );
0475 }
0476
0477 static void HelperB( rtems_task_argument arg )
0478 {
0479 Worker( arg, TQ_HELPER_B );
0480 }
0481
0482 static void HelperC( rtems_task_argument arg )
0483 {
0484 Worker( arg, TQ_HELPER_C );
0485 }
0486
0487 void TQInitialize( TQContext *ctx )
0488 {
0489 rtems_status_code sc;
0490 size_t i;
0491
0492 ctx->runner_id = rtems_task_self();
0493 ctx->runner_tcb = GetThread( RTEMS_SELF );
0494
0495
0496
0497
0498
0499 SetSelfPriority( PRIO_VERY_LOW );
0500
0501 for ( i = 0; i < RTEMS_ARRAY_SIZE( ctx->mutex_id ); ++i ) {
0502 rtems_attribute attributes;
0503
0504 attributes = RTEMS_BINARY_SEMAPHORE;
0505
0506 if ( i == TQ_MUTEX_NO_PROTOCOL ) {
0507 attributes |= RTEMS_PRIORITY;
0508 } else if ( i == TQ_MUTEX_FIFO ) {
0509 attributes |= RTEMS_FIFO;
0510 } else {
0511 attributes |= RTEMS_INHERIT_PRIORITY | RTEMS_PRIORITY;
0512 }
0513
0514 sc = rtems_semaphore_create(
0515 rtems_build_name( 'M', 'T', 'X', 'A' + i ),
0516 1,
0517 attributes,
0518 0,
0519 &ctx->mutex_id[ i ]
0520 );
0521 T_rsc_success( sc );
0522 }
0523
0524 ctx->worker_id[ TQ_BLOCKER_A ] = CreateTask( "BLKA", PRIO_HIGH );
0525 StartTask( ctx->worker_id[ TQ_BLOCKER_A ], BlockerA, ctx );
0526 ctx->worker_id[ TQ_BLOCKER_B ] = CreateTask( "BLKB", PRIO_VERY_HIGH );
0527 StartTask( ctx->worker_id[ TQ_BLOCKER_B ], BlockerB, ctx );
0528 ctx->worker_id[ TQ_BLOCKER_C ] = CreateTask( "BLKC", PRIO_ULTRA_HIGH );
0529 StartTask( ctx->worker_id[ TQ_BLOCKER_C ], BlockerC, ctx );
0530 ctx->worker_id[ TQ_BLOCKER_D ] = CreateTask( "BLKD", PRIO_LOW );
0531 StartTask( ctx->worker_id[ TQ_BLOCKER_D ], BlockerD, ctx );
0532 ctx->worker_id[ TQ_BLOCKER_E ] = CreateTask( "BLKE", PRIO_LOW );
0533 StartTask( ctx->worker_id[ TQ_BLOCKER_E ], BlockerE, ctx );
0534 ctx->worker_id[ TQ_WORKER_F ] = CreateTask( "WRKF", PRIO_LOW );
0535 StartTask( ctx->worker_id[ TQ_WORKER_F ], WorkerF, ctx );
0536 ctx->worker_id[ TQ_HELPER_A ] = CreateTask( "HLPA", PRIO_LOW );
0537 StartTask( ctx->worker_id[ TQ_HELPER_A ], HelperA, ctx );
0538 ctx->worker_id[ TQ_HELPER_B ] = CreateTask( "HLPB", PRIO_LOW );
0539 StartTask( ctx->worker_id[ TQ_HELPER_B ], HelperB, ctx );
0540 ctx->worker_id[ TQ_HELPER_C ] = CreateTask( "HLPC", PRIO_LOW );
0541 StartTask( ctx->worker_id[ TQ_HELPER_C ], HelperC, ctx );
0542
0543 for (i = 0; i < RTEMS_ARRAY_SIZE( ctx->worker_tcb ); ++i) {
0544 ctx->worker_tcb[ i ] = GetThread( ctx->worker_id[ i ] );
0545 }
0546
0547 SetSelfPriority( PRIO_NORMAL );
0548 }
0549
0550 void TQDestroy( TQContext *ctx )
0551 {
0552 size_t i;
0553
0554 for ( i = 0; i < RTEMS_ARRAY_SIZE( ctx->worker_id ); ++i ) {
0555 DeleteTask( ctx->worker_id[ i ] );
0556 }
0557
0558 for ( i = 0; i < RTEMS_ARRAY_SIZE( ctx->mutex_id ); ++i ) {
0559 if ( ctx->mutex_id[ i ] != 0 ) {
0560 rtems_status_code sc;
0561
0562 sc = rtems_semaphore_delete( ctx->mutex_id[ i ] );
0563 T_rsc_success( sc );
0564 }
0565 }
0566
0567 RestoreRunnerPriority();
0568 }
0569
0570 void TQReset( TQContext *ctx )
0571 {
0572 rtems_id scheduler_id;
0573
0574 scheduler_id = SCHEDULER_A_ID;
0575 SetScheduler( ctx->runner_id, scheduler_id, PRIO_NORMAL );
0576 TQSetScheduler( ctx, TQ_BLOCKER_A, scheduler_id, PRIO_HIGH );
0577 TQSetScheduler( ctx, TQ_BLOCKER_B, scheduler_id, PRIO_VERY_HIGH );
0578 TQSetScheduler( ctx, TQ_BLOCKER_C, scheduler_id, PRIO_ULTRA_HIGH );
0579 TQSetScheduler( ctx, TQ_BLOCKER_D, scheduler_id, PRIO_LOW );
0580 TQSetScheduler( ctx, TQ_BLOCKER_E, scheduler_id, PRIO_LOW );
0581 TQSetScheduler( ctx, TQ_HELPER_A, scheduler_id, PRIO_LOW );
0582 TQSetScheduler( ctx, TQ_HELPER_B, scheduler_id, PRIO_LOW );
0583 TQSetScheduler( ctx, TQ_HELPER_C, scheduler_id, PRIO_LOW );
0584 }
0585
0586 void TQSortMutexesByID( TQContext *ctx )
0587 {
0588 size_t i;
0589 size_t n;
0590
0591 n = 3;
0592
0593
0594 for ( i = 1; i < n ; ++i ) {
0595 size_t j;
0596
0597 for ( j = 0; j < n - i; ++j ) {
0598 if ( ctx->mutex_id[ j ] > ctx->mutex_id[ j + 1 ] ) {
0599 rtems_id tmp;
0600
0601 tmp = ctx->mutex_id[ j ];
0602 ctx->mutex_id[ j ] = ctx->mutex_id[ j + 1 ];
0603 ctx->mutex_id[ j + 1 ] = tmp;
0604 }
0605 }
0606 }
0607 }
0608
0609 void TQGetProperties( TQContext *ctx, TQWorkerKind enqueued_worker )
0610 {
0611 ( *ctx->get_properties )( ctx, enqueued_worker );
0612 }
0613
0614 Status_Control TQConvertStatus( TQContext *ctx, Status_Control status )
0615 {
0616 return ( *ctx->convert_status )( status );
0617 }
0618
0619 void TQEnqueuePrepare( TQContext *ctx )
0620 {
0621 ( *ctx->enqueue_prepare )( ctx );
0622 }
0623
0624 Status_Control TQEnqueue( TQContext *ctx, TQWait wait )
0625 {
0626 return ( *ctx->enqueue )( ctx, wait );
0627 }
0628
0629 Status_Control TQEnqueueFatal( TQContext *ctx )
0630 {
0631 Status_Control status;
0632
0633 SetFatalHandler( ThreadQueueDeadlock, ctx );
0634 status = STATUS_MINUS_ONE;
0635
0636 if ( setjmp( ctx->before_enqueue ) == 0 ) {
0637 status = TQEnqueue( ctx, ctx->wait );
0638 } else {
0639 status = STATUS_DEADLOCK;
0640 }
0641
0642 return status;
0643 }
0644
0645 void TQEnqueueDone( TQContext *ctx )
0646 {
0647 ( *ctx->enqueue_done )( ctx );
0648 }
0649
0650 Status_Control TQSurrender( TQContext *ctx )
0651 {
0652 return ( *ctx->surrender )( ctx );
0653 }
0654
0655 void TQFlush( TQContext *ctx, bool flush_all )
0656 {
0657 ctx->flush_count = ( *ctx->flush )( ctx, ctx->how_many, flush_all );
0658 }
0659
0660 rtems_tcb *TQGetOwner( TQContext *ctx )
0661 {
0662 rtems_tcb *( *get_owner )( TQContext * );
0663
0664 get_owner = ctx->get_owner;
0665
0666 if ( get_owner == NULL ) {
0667 return NULL;
0668 }
0669
0670 return ( *get_owner )( ctx );
0671 }
0672
0673 void TQSchedulerRecordStart( TQContext *ctx )
0674 {
0675 T_scheduler_log *log;
0676
0677 log = T_scheduler_record_40( &ctx->scheduler_log );
0678 T_null( log );
0679 }
0680
0681 void TQSchedulerRecordStop( TQContext *ctx )
0682 {
0683 T_scheduler_log *log;
0684
0685 log = T_scheduler_record( NULL );
0686 T_eq_ptr( &log->header, &ctx->scheduler_log.header );
0687 }
0688
0689 const T_scheduler_event *TQGetNextAny( TQContext *ctx, size_t *index )
0690 {
0691 return T_scheduler_next_any(
0692 &ctx->scheduler_log.header,
0693 index
0694 );
0695 }
0696
0697 const T_scheduler_event *TQGetNextBlock( TQContext *ctx, size_t *index )
0698 {
0699 return T_scheduler_next(
0700 &ctx->scheduler_log.header,
0701 T_SCHEDULER_BLOCK,
0702 index
0703 );
0704 }
0705
0706 const T_scheduler_event *TQGetNextUnblock( TQContext *ctx, size_t *index )
0707 {
0708 return T_scheduler_next(
0709 &ctx->scheduler_log.header,
0710 T_SCHEDULER_UNBLOCK,
0711 index
0712 );
0713 }
0714
0715 const T_scheduler_event *TQGetNextUpdatePriority(
0716 TQContext *ctx,
0717 size_t *index
0718 )
0719 {
0720 return T_scheduler_next(
0721 &ctx->scheduler_log.header,
0722 T_SCHEDULER_UPDATE_PRIORITY,
0723 index
0724 );
0725 }
0726
0727 const T_scheduler_event *TQGetNextAskForHelp(
0728 TQContext *ctx,
0729 size_t *index
0730 )
0731 {
0732 return T_scheduler_next(
0733 &ctx->scheduler_log.header,
0734 T_SCHEDULER_ASK_FOR_HELP,
0735 index
0736 );
0737 }
0738
0739 void TQDoNothing( TQContext *ctx )
0740 {
0741 (void) ctx;
0742 }
0743
0744 Status_Control TQDoNothingSuccessfully( TQContext *ctx )
0745 {
0746 (void) ctx;
0747
0748 return STATUS_SUCCESSFUL;
0749 }
0750
0751 Status_Control TQConvertStatusClassic( Status_Control status )
0752 {
0753 return STATUS_BUILD( STATUS_GET_CLASSIC( status ), 0 );
0754 }
0755
0756 Status_Control TQConvertStatusPOSIX( Status_Control status )
0757 {
0758 return STATUS_BUILD( 0, STATUS_GET_POSIX( status ) );
0759 }
0760
0761 void TQEnqueuePrepareDefault( TQContext *ctx )
0762 {
0763 Status_Control status;
0764
0765 status = TQEnqueue( ctx, TQ_NO_WAIT );
0766 T_eq_int( status, TQConvertStatus( ctx, STATUS_SUCCESSFUL ) );
0767 }
0768
0769 void TQEnqueueDoneDefault( TQContext *ctx )
0770 {
0771 Status_Control status;
0772
0773 status = TQSurrender( ctx );
0774 T_eq_int( status, TQConvertStatus( ctx, STATUS_SUCCESSFUL ) );
0775 }
0776
0777 Status_Control TQEnqueueClassicSem( TQContext *ctx, TQWait wait )
0778 {
0779 rtems_status_code sc;
0780 rtems_option option;
0781 rtems_option timeout;
0782
0783 switch ( wait ) {
0784 case TQ_WAIT_FOREVER:
0785 option = RTEMS_WAIT;
0786 timeout = RTEMS_NO_TIMEOUT;
0787 break;
0788 case TQ_WAIT_TIMED:
0789 option = RTEMS_WAIT;
0790 timeout = UINT32_MAX;
0791 break;
0792 default:
0793 option = RTEMS_NO_WAIT;
0794 timeout = 0;
0795 break;
0796 }
0797
0798 sc = rtems_semaphore_obtain( ctx->thread_queue_id, option, timeout );
0799
0800 return STATUS_BUILD( sc, 0 );
0801 }
0802
0803 Status_Control TQSurrenderClassicSem( TQContext *ctx )
0804 {
0805 rtems_status_code sc;
0806
0807 sc = rtems_semaphore_release( ctx->thread_queue_id );
0808
0809 return STATUS_BUILD( sc, 0 );
0810 }
0811
0812 rtems_tcb *TQGetOwnerClassicSem( TQContext *ctx )
0813 {
0814 Semaphore_Control *semaphore;
0815 Thread_queue_Context queue_context;
0816 rtems_tcb *thread;
0817
0818 semaphore = _Semaphore_Get( ctx->thread_queue_id, &queue_context );
0819 T_assert_not_null( semaphore );
0820 thread = semaphore->Core_control.Wait_queue.Queue.owner;
0821 _ISR_lock_ISR_enable( &queue_context.Lock_context.Lock_context );
0822
0823 return thread;
0824 }
0825
0826 uint32_t TQSemGetCount( TQSemContext *ctx )
0827 {
0828 return ( *ctx->get_count )( ctx );
0829 }
0830
0831 void TQSemSetCount( TQSemContext *ctx, uint32_t count )
0832 {
0833 ( *ctx->set_count )( ctx, count );
0834 }
0835
0836 uint32_t TQSemGetCountClassic( TQSemContext *ctx )
0837 {
0838 Semaphore_Control *semaphore;
0839 Thread_queue_Context queue_context;
0840 uint32_t count;
0841
0842 semaphore = _Semaphore_Get( ctx->base.thread_queue_id, &queue_context );
0843 T_assert_not_null( semaphore );
0844 count = semaphore->Core_control.Semaphore.count;
0845 _ISR_lock_ISR_enable( &queue_context.Lock_context.Lock_context );
0846
0847 return count;
0848 }
0849
0850 void TQSemSetCountClassic( TQSemContext *ctx, uint32_t count )
0851 {
0852 Semaphore_Control *semaphore;
0853 Thread_queue_Context queue_context;
0854
0855 semaphore = _Semaphore_Get( ctx->base.thread_queue_id, &queue_context );
0856 T_assert_not_null( semaphore );
0857 semaphore->Core_control.Semaphore.count = count;
0858 _ISR_lock_ISR_enable( &queue_context.Lock_context.Lock_context );
0859 }