File indexing completed on 2025-05-11 08:24:54
0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037 #ifdef HAVE_CONFIG_H
0038 #include "config.h"
0039 #endif
0040
0041 #include "tx-support.h"
0042 #include "ts-config.h"
0043
0044 #include <rtems/test.h>
0045 #include <rtems/score/percpu.h>
0046 #include <rtems/score/smpimpl.h>
0047 #include <rtems/score/threaddispatch.h>
0048 #include <rtems/score/threadimpl.h>
0049 #include <rtems/rtems/semimpl.h>
0050
0051 #include <string.h>
0052
0053 rtems_id DoCreateTask( rtems_name name, rtems_task_priority priority )
0054 {
0055 rtems_status_code sc;
0056 rtems_id id;
0057
0058 sc = rtems_task_create(
0059 name,
0060 priority,
0061 TEST_MINIMUM_STACK_SIZE,
0062 RTEMS_DEFAULT_MODES,
0063 RTEMS_DEFAULT_ATTRIBUTES,
0064 &id
0065 );
0066 T_assert_rsc_success( sc );
0067
0068 return id;
0069 }
0070
0071 void StartTask( rtems_id id, rtems_task_entry entry, void *arg )
0072 {
0073 rtems_status_code sc;
0074
0075 sc = rtems_task_start( id, entry, (rtems_task_argument) arg);
0076 T_assert_rsc_success( sc );
0077 }
0078
0079 void DeleteTask( rtems_id id )
0080 {
0081 if ( id != 0 ) {
0082 rtems_status_code sc;
0083
0084 sc = rtems_task_delete( id );
0085 T_quiet_rsc_success( sc );
0086 }
0087 }
0088
0089 void SuspendTask( rtems_id id )
0090 {
0091 rtems_status_code sc;
0092
0093 sc = rtems_task_suspend( id );
0094 T_quiet_rsc_success( sc );
0095 }
0096
0097 void SuspendSelf( void )
0098 {
0099 SuspendTask( RTEMS_SELF );
0100 }
0101
0102 void ResumeTask( rtems_id id )
0103 {
0104 rtems_status_code sc;
0105
0106 sc = rtems_task_resume( id );
0107 T_quiet_rsc_success( sc );
0108 }
0109
0110 bool IsTaskSuspended( rtems_id id )
0111 {
0112 rtems_status_code sc;
0113
0114 sc = rtems_task_is_suspended( id );
0115 T_quiet_true( sc == RTEMS_SUCCESSFUL || sc == RTEMS_ALREADY_SUSPENDED );
0116
0117 return sc == RTEMS_ALREADY_SUSPENDED;
0118 }
0119
0120 rtems_event_set QueryPendingEvents( void )
0121 {
0122 rtems_status_code sc;
0123 rtems_event_set events;
0124
0125 events = 0;
0126 sc = rtems_event_receive(
0127 RTEMS_PENDING_EVENTS,
0128 RTEMS_EVENT_ALL | RTEMS_NO_WAIT,
0129 0,
0130 &events
0131 );
0132 T_quiet_rsc_success( sc );
0133
0134 return events;
0135 }
0136
0137 rtems_event_set PollAnyEvents( void )
0138 {
0139 rtems_event_set events;
0140
0141 events = 0;
0142 (void) rtems_event_receive(
0143 RTEMS_ALL_EVENTS,
0144 RTEMS_EVENT_ANY | RTEMS_NO_WAIT,
0145 0,
0146 &events
0147 );
0148
0149 return events;
0150 }
0151
0152 rtems_event_set ReceiveAnyEvents( void )
0153 {
0154 return ReceiveAnyEventsTimed( RTEMS_NO_TIMEOUT );
0155 }
0156
0157 rtems_event_set ReceiveAnyEventsTimed( rtems_interval ticks )
0158 {
0159 rtems_event_set events;
0160
0161 events = 0;
0162 (void) rtems_event_receive(
0163 RTEMS_ALL_EVENTS,
0164 RTEMS_EVENT_ANY | RTEMS_WAIT,
0165 ticks,
0166 &events
0167 );
0168
0169 return events;
0170 }
0171
0172 void ReceiveAllEvents( rtems_event_set events )
0173 {
0174 rtems_status_code sc;
0175 rtems_event_set received;
0176
0177 received = 0;
0178 sc = rtems_event_receive(
0179 events,
0180 RTEMS_EVENT_ALL | RTEMS_WAIT,
0181 RTEMS_NO_TIMEOUT,
0182 &received
0183 );
0184 T_quiet_rsc_success( sc );
0185 T_quiet_eq_u32( received, events );
0186 }
0187
0188 void SendEvents( rtems_id id, rtems_event_set events )
0189 {
0190 rtems_status_code sc;
0191
0192 sc = rtems_event_send( id, events );
0193 T_quiet_rsc_success( sc );
0194 }
0195
0196 rtems_mode GetMode( void )
0197 {
0198 return SetMode( RTEMS_DEFAULT_MODES, RTEMS_CURRENT_MODE );
0199 }
0200
0201 rtems_mode SetMode( rtems_mode set, rtems_mode mask )
0202 {
0203 rtems_status_code sc;
0204 rtems_mode previous;
0205
0206 sc = rtems_task_mode( set, mask, &previous );
0207 T_quiet_rsc_success( sc );
0208
0209 return previous;
0210 }
0211
0212 rtems_task_priority GetPriority( rtems_id id )
0213 {
0214 return SetPriority( id, RTEMS_CURRENT_PRIORITY );
0215 }
0216
0217 rtems_task_priority GetPriorityByScheduler(
0218 rtems_id task_id,
0219 rtems_id scheduler_id
0220 )
0221 {
0222 rtems_status_code sc;
0223 rtems_task_priority priority;
0224
0225 priority = PRIO_INVALID;
0226 sc = rtems_task_get_priority( task_id, scheduler_id, &priority );
0227
0228 if ( sc != RTEMS_SUCCESSFUL ) {
0229 return PRIO_INVALID;
0230 }
0231
0232 return priority;
0233 }
0234
0235 rtems_task_priority SetPriority( rtems_id id, rtems_task_priority priority )
0236 {
0237 rtems_status_code sc;
0238 rtems_task_priority previous;
0239
0240 previous = PRIO_INVALID;
0241 sc = rtems_task_set_priority( id, priority, &previous );
0242 T_quiet_rsc_success( sc );
0243
0244 return previous;
0245 }
0246
0247 rtems_task_priority GetSelfPriority( void )
0248 {
0249 return SetPriority( RTEMS_SELF, RTEMS_CURRENT_PRIORITY );
0250 }
0251
0252 rtems_task_priority SetSelfPriority( rtems_task_priority priority )
0253 {
0254 return SetPriority( RTEMS_SELF, priority );
0255 }
0256
0257 rtems_task_priority SetSelfPriorityNoYield( rtems_task_priority priority )
0258 {
0259 rtems_status_code sc;
0260 rtems_id id;
0261
0262
0263
0264
0265
0266
0267 sc = rtems_semaphore_create(
0268 rtems_build_name( 'T', 'E', 'M', 'P' ),
0269 0,
0270 RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY | RTEMS_PRIORITY_CEILING,
0271 1,
0272 &id
0273 );
0274 T_quiet_rsc_success( sc );
0275
0276 priority = SetSelfPriority( priority );
0277 ReleaseMutex( id );
0278 DeleteMutex( id );
0279
0280 return priority;
0281 }
0282
0283 rtems_id GetScheduler( rtems_id id )
0284 {
0285 rtems_status_code sc;
0286 rtems_id scheduler_id;
0287
0288 scheduler_id = 0xffffffff;
0289 sc = rtems_task_get_scheduler( id, &scheduler_id );
0290 T_quiet_rsc_success( sc );
0291
0292 return scheduler_id;
0293 }
0294
0295 rtems_id GetSelfScheduler( void )
0296 {
0297 return GetScheduler( RTEMS_SELF );
0298 }
0299
0300 void SetScheduler(
0301 rtems_id task_id,
0302 rtems_id scheduler_id,
0303 rtems_task_priority priority
0304 )
0305 {
0306 rtems_status_code sc;
0307
0308 sc = rtems_task_set_scheduler( task_id, scheduler_id, priority );
0309 T_quiet_rsc_success( sc );
0310 }
0311
0312 void SetSelfScheduler( rtems_id scheduler_id, rtems_task_priority priority )
0313 {
0314 SetScheduler( RTEMS_SELF, scheduler_id, priority );
0315 }
0316
0317 void GetAffinity( rtems_id id, cpu_set_t *set )
0318 {
0319 rtems_status_code sc;
0320
0321 CPU_ZERO( set );
0322 sc = rtems_task_get_affinity( id, sizeof( *set ), set );
0323 T_quiet_rsc_success( sc );
0324 }
0325
0326 void GetSelfAffinity( cpu_set_t *set )
0327 {
0328 GetAffinity( RTEMS_SELF, set );
0329 }
0330
0331 void SetAffinity( rtems_id id, const cpu_set_t *set )
0332 {
0333 rtems_status_code sc;
0334
0335 sc = rtems_task_set_affinity( id, sizeof( *set ), set );
0336 T_quiet_rsc_success( sc );
0337 }
0338
0339 void SetSelfAffinity( const cpu_set_t *set )
0340 {
0341 SetAffinity( RTEMS_SELF, set );
0342 }
0343
0344 void SetAffinityOne( rtems_id id, uint32_t cpu_index )
0345 {
0346 cpu_set_t set;
0347
0348 CPU_ZERO( &set );
0349 CPU_SET( (int) cpu_index, &set );
0350 SetAffinity( id, &set );
0351 }
0352
0353 void SetSelfAffinityOne( uint32_t cpu_index )
0354 {
0355 SetAffinityOne( RTEMS_SELF, cpu_index );
0356 }
0357
0358 void SetAffinityAll( rtems_id id )
0359 {
0360 cpu_set_t set;
0361
0362 CPU_FILL( &set );
0363 SetAffinity( id, &set );
0364 }
0365
0366 void SetSelfAffinityAll( void )
0367 {
0368 SetAffinityAll( RTEMS_SELF );
0369 }
0370
0371 void Yield( void )
0372 {
0373 rtems_status_code sc;
0374
0375 sc = rtems_task_wake_after( RTEMS_YIELD_PROCESSOR );
0376 T_quiet_rsc_success( sc );
0377 }
0378
0379 void YieldTask( rtems_id id )
0380 {
0381 Thread_Control *the_thread;
0382 ISR_lock_Context lock_context;
0383 Per_CPU_Control *cpu_self;
0384
0385 the_thread = _Thread_Get( id, &lock_context );
0386
0387 if ( the_thread == NULL ) {
0388 return;
0389 }
0390
0391 cpu_self = _Thread_Dispatch_disable_critical( &lock_context );
0392 _ISR_lock_ISR_enable( &lock_context);
0393 _Thread_Yield( the_thread );
0394 _Thread_Dispatch_direct( cpu_self );
0395 }
0396
0397 void AddProcessor( rtems_id scheduler_id, uint32_t cpu_index )
0398 {
0399 rtems_status_code sc;
0400
0401 sc = rtems_scheduler_add_processor( scheduler_id, cpu_index );
0402 T_quiet_rsc_success( sc );
0403 }
0404
0405 void RemoveProcessor( rtems_id scheduler_id, uint32_t cpu_index )
0406 {
0407 rtems_status_code sc;
0408
0409 sc = rtems_scheduler_remove_processor( scheduler_id, cpu_index );
0410 T_quiet_rsc_success( sc );
0411 }
0412
0413 rtems_id CreateMutex( void )
0414 {
0415 rtems_status_code sc;
0416 rtems_id id;
0417
0418 id = INVALID_ID;
0419 sc = rtems_semaphore_create(
0420 rtems_build_name( 'M', 'U', 'T', 'X' ),
0421 1,
0422 RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY | RTEMS_INHERIT_PRIORITY,
0423 0,
0424 &id
0425 );
0426 T_rsc_success( sc );
0427
0428 return id;
0429 }
0430
0431 rtems_id CreateMutexNoProtocol( void )
0432 {
0433 rtems_status_code sc;
0434 rtems_id id;
0435
0436 id = INVALID_ID;
0437 sc = rtems_semaphore_create(
0438 rtems_build_name( 'M', 'U', 'T', 'X' ),
0439 1,
0440 RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY,
0441 0,
0442 &id
0443 );
0444 T_rsc_success( sc );
0445
0446 return id;
0447 }
0448
0449 rtems_id CreateMutexFIFO( void )
0450 {
0451 rtems_status_code sc;
0452 rtems_id id;
0453
0454 id = INVALID_ID;
0455 sc = rtems_semaphore_create(
0456 rtems_build_name( 'M', 'U', 'T', 'X' ),
0457 1,
0458 RTEMS_BINARY_SEMAPHORE | RTEMS_FIFO,
0459 0,
0460 &id
0461 );
0462 T_rsc_success( sc );
0463
0464 return id;
0465 }
0466
0467 void DeleteMutex( rtems_id id )
0468 {
0469 if ( id != INVALID_ID ) {
0470 rtems_status_code sc;
0471
0472 sc = rtems_semaphore_delete( id );
0473 T_rsc_success( sc );
0474 }
0475 }
0476
0477 bool IsMutexOwner( rtems_id id )
0478 {
0479 Semaphore_Control *the_semaphore;
0480 Thread_queue_Context queue_context;
0481
0482 the_semaphore = _Semaphore_Get( id, &queue_context );
0483 if ( the_semaphore == NULL ) {
0484 return false;
0485 }
0486
0487 _ISR_lock_ISR_enable( &queue_context.Lock_context.Lock_context );
0488 return the_semaphore->Core_control.Wait_queue.Queue.owner ==
0489 _Thread_Get_executing();
0490 }
0491
0492 void ObtainMutex( rtems_id id )
0493 {
0494 rtems_status_code sc;
0495
0496 sc = rtems_semaphore_obtain( id, RTEMS_WAIT, RTEMS_NO_TIMEOUT );
0497 T_rsc_success( sc );
0498 }
0499
0500 void ObtainMutexTimed( rtems_id id, rtems_interval ticks )
0501 {
0502 rtems_status_code sc;
0503
0504 sc = rtems_semaphore_obtain( id, RTEMS_WAIT, ticks );
0505 T_rsc_success( sc );
0506 }
0507
0508 void ObtainMutexDeadlock( rtems_id id )
0509 {
0510 rtems_status_code sc;
0511
0512 sc = rtems_semaphore_obtain( id, RTEMS_WAIT, RTEMS_NO_TIMEOUT );
0513 T_rsc( sc, RTEMS_INCORRECT_STATE );
0514 }
0515
0516 void ReleaseMutex( rtems_id id )
0517 {
0518 rtems_status_code sc;
0519
0520 sc = rtems_semaphore_release( id );
0521 T_rsc_success( sc );
0522 }
0523
0524 Thread_queue_Queue *GetMutexThreadQueue( rtems_id id )
0525 {
0526 Semaphore_Control *the_semaphore;
0527 Thread_queue_Context queue_context;
0528
0529 the_semaphore = _Semaphore_Get( id, &queue_context );
0530 if ( the_semaphore == NULL ) {
0531 return NULL;
0532 }
0533
0534 _ISR_lock_ISR_enable( &queue_context.Lock_context.Lock_context );
0535 return &the_semaphore->Core_control.Wait_queue.Queue;
0536 }
0537
0538 void RestoreRunnerASR( void )
0539 {
0540 rtems_status_code sc;
0541
0542 sc = rtems_signal_catch( NULL, RTEMS_DEFAULT_MODES );
0543 T_quiet_rsc_success( sc );
0544 }
0545
0546 void RestoreRunnerMode( void )
0547 {
0548 rtems_status_code sc;
0549 rtems_mode mode;
0550
0551 sc = rtems_task_mode( RTEMS_DEFAULT_MODES, RTEMS_ALL_MODE_MASKS, &mode );
0552 T_quiet_rsc_success( sc );
0553 }
0554
0555 void RestoreRunnerPriority( void )
0556 {
0557 SetSelfPriority( 1 );
0558 }
0559
0560 void RestoreRunnerScheduler( void )
0561 {
0562 SetSelfScheduler( SCHEDULER_A_ID, 1 );
0563 }
0564
0565 Thread_Control *GetThread( rtems_id id )
0566 {
0567 Thread_Control *the_thread;
0568 ISR_lock_Context lock_context;
0569
0570 the_thread = _Thread_Get( id, &lock_context );
0571
0572 if ( the_thread == NULL ) {
0573 return NULL;
0574 }
0575
0576 _ISR_lock_ISR_enable( &lock_context);
0577 return the_thread;
0578 }
0579
0580 Thread_Control *GetExecuting( void )
0581 {
0582 return _Thread_Get_executing();
0583 }
0584
0585 void KillZombies( void )
0586 {
0587 _RTEMS_Lock_allocator();
0588 _Thread_Kill_zombies();
0589 _RTEMS_Unlock_allocator();
0590 }
0591
0592 void WaitForExecutionStop( rtems_id task_id )
0593 {
0594 #if defined( RTEMS_SMP )
0595 Thread_Control *the_thread;
0596
0597 the_thread = GetThread( task_id );
0598 T_assert_not_null( the_thread );
0599
0600 while ( _Thread_Is_executing_on_a_processor( the_thread ) ) {
0601
0602 }
0603 #else
0604 (void) task_id;
0605 #endif
0606 }
0607
0608 void WaitForIntendToBlock( rtems_id task_id )
0609 {
0610 #if defined( RTEMS_SMP )
0611 Thread_Control *the_thread;
0612 Thread_Wait_flags intend_to_block;
0613
0614 the_thread = GetThread( task_id );
0615 T_assert_not_null( the_thread );
0616
0617 intend_to_block = THREAD_WAIT_CLASS_OBJECT |
0618 THREAD_WAIT_STATE_INTEND_TO_BLOCK;
0619
0620 while ( _Thread_Wait_flags_get_acquire( the_thread ) != intend_to_block ) {
0621
0622 }
0623 #else
0624 (void) task_id;
0625 #endif
0626 }
0627
0628 void WaitForHeir( uint32_t cpu_index, rtems_id task_id )
0629 {
0630 Per_CPU_Control *cpu;
0631
0632 cpu = _Per_CPU_Get_by_index( cpu_index );
0633
0634 while ( cpu->heir->Object.id != task_id ) {
0635 RTEMS_COMPILER_MEMORY_BARRIER();
0636 }
0637 }
0638
0639 void WaitForNextTask( uint32_t cpu_index, rtems_id task_id )
0640 {
0641 Per_CPU_Control *cpu;
0642
0643 cpu = _Per_CPU_Get_by_index( cpu_index );
0644
0645 while ( cpu->heir->Object.id == task_id ) {
0646 RTEMS_COMPILER_MEMORY_BARRIER();
0647 }
0648
0649 while ( cpu->thread_dispatch_disable_level != 0 ) {
0650 RTEMS_COMPILER_MEMORY_BARRIER();
0651 }
0652 }
0653
0654 void GetTaskTimerInfo( rtems_id id, TaskTimerInfo *info )
0655 {
0656 GetTaskTimerInfoByThread( GetThread( id ), info );
0657 }
0658
0659 void GetTaskTimerInfoByThread(
0660 struct _Thread_Control *thread,
0661 TaskTimerInfo *info
0662 )
0663 {
0664 info->expire_ticks = 0;
0665 info->expire_timespec.tv_sec = -1;
0666 info->expire_timespec.tv_nsec = -1;
0667
0668 if ( thread != NULL ) {
0669 ISR_lock_Context lock_context;
0670 ISR_lock_Context lock_context_2;
0671 Per_CPU_Control *cpu;
0672
0673 _ISR_lock_ISR_disable_and_acquire( &thread->Timer.Lock, &lock_context );
0674 info->expire_ticks = thread->Timer.Watchdog.expire;
0675 #if defined( RTEMS_SMP )
0676 cpu = thread->Timer.Watchdog.cpu;
0677 #else
0678 cpu = _Per_CPU_Get();
0679 #endif
0680 _Watchdog_Per_CPU_acquire_critical( cpu, &lock_context_2 );
0681
0682 if ( _Watchdog_Is_scheduled( &thread->Timer.Watchdog ) ) {
0683 const Watchdog_Header *hdr;
0684
0685 hdr = thread->Timer.header;
0686
0687 if ( hdr == &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_TICKS ] ) {
0688 info->state = TASK_TIMER_TICKS;
0689 } else {
0690 _Watchdog_Ticks_to_timespec(
0691 info->expire_ticks,
0692 &info->expire_timespec
0693 );
0694
0695 if ( hdr == &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_REALTIME ] ) {
0696 info->state = TASK_TIMER_REALTIME;
0697 } else {
0698 T_quiet_eq_ptr(
0699 hdr,
0700 &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_MONOTONIC ]
0701 );
0702 info->state = TASK_TIMER_MONOTONIC;
0703 }
0704 }
0705 } else {
0706 info->state = TASK_TIMER_INACTIVE;
0707 }
0708
0709 _Watchdog_Per_CPU_release_critical( cpu, &lock_context_2 );
0710 _ISR_lock_Release_and_ISR_enable( &thread->Timer.Lock, &lock_context );
0711 } else {
0712 info->state = TASK_TIMER_INVALID;
0713 }
0714 }
0715
0716 #if defined( RTEMS_SMP )
0717 static void DoWatchdogTick( void *arg )
0718 {
0719 (void) arg;
0720 _Watchdog_Tick( _Per_CPU_Get() );
0721 }
0722 #endif
0723
0724 void ClockTick( void )
0725 {
0726 Per_CPU_Control *cpu_self;
0727
0728 cpu_self = _Thread_Dispatch_disable();
0729 #if defined( RTEMS_SMP )
0730 DoWatchdogTick( NULL );
0731 _SMP_Othercast_action( DoWatchdogTick, NULL );
0732 #else
0733 _Watchdog_Tick( cpu_self );
0734 #endif
0735 _Thread_Dispatch_enable( cpu_self );
0736 }
0737
0738 static void FinalWatchdogTick( Per_CPU_Control *cpu )
0739 {
0740 ISR_lock_Context lock_context;
0741 Watchdog_Header *header;
0742 Watchdog_Control *first;
0743
0744 _ISR_lock_ISR_disable_and_acquire( &cpu->Watchdog.Lock, &lock_context );
0745
0746 header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_TICKS ];
0747 first = _Watchdog_Header_first( header );
0748
0749 if ( first != NULL ) {
0750 _Watchdog_Tickle(
0751 header,
0752 first,
0753 UINT64_MAX,
0754 &cpu->Watchdog.Lock,
0755 &lock_context
0756 );
0757 }
0758
0759 header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_MONOTONIC ];
0760 first = _Watchdog_Header_first( header );
0761
0762 if ( first != NULL ) {
0763 _Watchdog_Tickle(
0764 header,
0765 first,
0766 UINT64_MAX,
0767 &cpu->Watchdog.Lock,
0768 &lock_context
0769 );
0770 }
0771
0772 header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_REALTIME ];
0773 first = _Watchdog_Header_first( header );
0774
0775 if ( first != NULL ) {
0776 _Watchdog_Tickle(
0777 header,
0778 first,
0779 UINT64_MAX,
0780 &cpu->Watchdog.Lock,
0781 &lock_context
0782 );
0783 }
0784
0785 _ISR_lock_Release_and_ISR_enable( &cpu->Watchdog.Lock, &lock_context );
0786 }
0787
0788 #if defined( RTEMS_SMP )
0789 static void DoFinalWatchdogTick( void *arg )
0790 {
0791 (void) arg;
0792 FinalWatchdogTick( _Per_CPU_Get() );
0793 }
0794 #endif
0795
0796 void FinalClockTick( void )
0797 {
0798 Per_CPU_Control *cpu_self;
0799
0800 cpu_self = _Thread_Dispatch_disable();
0801 #if defined( RTEMS_SMP )
0802 DoFinalWatchdogTick( NULL );
0803 _SMP_Othercast_action( DoFinalWatchdogTick, NULL );
0804 #else
0805 FinalWatchdogTick( cpu_self );
0806 #endif
0807 _Thread_Dispatch_enable( cpu_self );
0808 }
0809
0810 static FatalHandler fatal_handler;
0811
0812 static void *fatal_arg;
0813
0814 void FatalInitialExtension(
0815 rtems_fatal_source source,
0816 bool always_set_to_false,
0817 rtems_fatal_code code
0818 )
0819 {
0820 FatalHandler fatal;
0821
0822 T_quiet_false( always_set_to_false );
0823 fatal = fatal_handler;
0824
0825 if ( fatal != NULL ) {
0826 ( *fatal )( source, code, fatal_arg );
0827 }
0828 }
0829
0830 void SetFatalHandler( FatalHandler fatal, void *arg )
0831 {
0832 fatal_handler = fatal;
0833 fatal_arg = arg;
0834 }
0835
0836 static rtems_id task_switch_id;
0837
0838 static rtems_task_switch_extension task_switch_extension;
0839
0840 static void TaskSwitchExtension( rtems_tcb *executing, rtems_tcb *heir )
0841 {
0842 ( *task_switch_extension )( executing, heir );
0843 }
0844
0845 void SetTaskSwitchExtension( rtems_task_switch_extension task_switch )
0846 {
0847 rtems_task_switch_extension last;
0848 rtems_status_code sc;
0849
0850 last = task_switch_extension;
0851
0852 if ( task_switch == NULL ) {
0853 if ( last != NULL ) {
0854 sc = rtems_extension_delete( task_switch_id );
0855 T_quiet_rsc_success( sc );
0856
0857 task_switch_extension = NULL;
0858 }
0859 } else {
0860 task_switch_extension = task_switch;
0861
0862 if ( last == NULL ) {
0863 rtems_extensions_table table = {
0864 .thread_switch = TaskSwitchExtension
0865 };
0866
0867 sc = rtems_extension_create(
0868 rtems_build_name( 'T', 'S', 'W', 'I' ),
0869 &table,
0870 &task_switch_id
0871 );
0872 T_quiet_rsc_success( sc );
0873 }
0874 }
0875 }
0876
0877 void ClearExtensionCalls( ExtensionCalls *calls )
0878 {
0879 memset( calls, 0, sizeof( *calls ) );
0880 }
0881
0882 void CopyExtensionCalls( const ExtensionCalls *from, ExtensionCalls *to )
0883 {
0884 memcpy( to, from, sizeof( *to ) );
0885 }
0886
0887 #if defined(RTEMS_SMP)
0888 static volatile bool delay_thread_dispatch;
0889
0890 static void DelayThreadDispatchHandler( void *arg )
0891 {
0892 (void) arg;
0893
0894 while ( delay_thread_dispatch ) {
0895
0896 }
0897 }
0898
0899 static const Per_CPU_Job_context delay_thread_dispatch_context = {
0900 .handler = DelayThreadDispatchHandler
0901 };
0902
0903 static Per_CPU_Job delay_thread_dispatch_job = {
0904 .context = &delay_thread_dispatch_context
0905 };
0906 #endif
0907
0908 void StartDelayThreadDispatch( uint32_t cpu_index )
0909 {
0910 #if defined(RTEMS_SMP)
0911 if ( rtems_configuration_get_maximum_processors() > cpu_index ) {
0912 delay_thread_dispatch = true;
0913 _Per_CPU_Submit_job(
0914 _Per_CPU_Get_by_index( cpu_index ),
0915 &delay_thread_dispatch_job
0916 );
0917 }
0918 #endif
0919 }
0920
0921 void StopDelayThreadDispatch( uint32_t cpu_index )
0922 {
0923 #if defined(RTEMS_SMP)
0924 if ( rtems_configuration_get_maximum_processors() > cpu_index ) {
0925 Per_CPU_Control *cpu_self;
0926
0927 cpu_self = _Thread_Dispatch_disable();
0928 delay_thread_dispatch = false;
0929 _Per_CPU_Wait_for_job(
0930 _Per_CPU_Get_by_index( cpu_index ),
0931 &delay_thread_dispatch_job
0932 );
0933 _Thread_Dispatch_enable( cpu_self );
0934 }
0935 #endif
0936 }
0937
0938 bool AreInterruptsEnabled( void )
0939 {
0940 return _ISR_Get_level() == 0;
0941 }
0942
0943 static bool IsWhiteSpace( char c )
0944 {
0945 return c == ' ' || c == '\t';
0946 }
0947
0948 bool IsWhiteSpaceOnly( const char *s )
0949 {
0950 char c;
0951
0952 while ( ( c = *s ) != '\0' ) {
0953 if ( !IsWhiteSpace( c ) ) {
0954 return false;
0955 }
0956
0957 ++s;
0958 }
0959
0960 return true;
0961 }
0962
0963 static const char *EatWhiteSpace( const char *s )
0964 {
0965 char c;
0966
0967 while ( ( c = *s ) != '\0' ) {
0968 if ( !IsWhiteSpace( c ) ) {
0969 break;
0970 }
0971
0972 ++s;
0973 }
0974
0975 return s;
0976 }
0977
0978 bool IsEqualIgnoreWhiteSpace( const char *a, const char *b )
0979 {
0980 while ( true ) {
0981 a = EatWhiteSpace( a );
0982 b = EatWhiteSpace( b );
0983
0984 if ( *a != *b ) {
0985 return false;
0986 }
0987
0988 if ( *a == '\0' ) {
0989 return true;
0990 }
0991
0992 ++a;
0993 ++b;
0994 }
0995
0996 return true;
0997 }
0998
0999 #if defined(RTEMS_SMP)
1000 bool TicketLockIsAvailable( const SMP_ticket_lock_Control *lock )
1001 {
1002 unsigned int now_serving;
1003 unsigned int next_ticket;
1004
1005 now_serving = _Atomic_Load_uint( &lock->now_serving, ATOMIC_ORDER_RELAXED );
1006 next_ticket = _Atomic_Load_uint( &lock->next_ticket, ATOMIC_ORDER_RELAXED );
1007
1008 return now_serving == next_ticket;
1009 }
1010
1011 void TicketLockWaitForOwned( const SMP_ticket_lock_Control *lock )
1012 {
1013 while ( TicketLockIsAvailable( lock ) ) {
1014
1015 }
1016 }
1017
1018 void TicketLockWaitForOthers(
1019 const SMP_ticket_lock_Control *lock,
1020 unsigned int others
1021 )
1022 {
1023 unsigned int expected;
1024 unsigned int actual;
1025
1026 expected = _Atomic_Load_uint( &lock->now_serving, ATOMIC_ORDER_RELAXED );
1027 expected += others + 1;
1028
1029 do {
1030 actual = _Atomic_Load_uint( &lock->next_ticket, ATOMIC_ORDER_RELAXED );
1031 } while ( expected != actual );
1032 }
1033
1034 void TicketLockGetState(
1035 const SMP_ticket_lock_Control *lock,
1036 TicketLockState *state
1037 )
1038 {
1039 state->lock = lock;
1040 state->next_ticket =
1041 _Atomic_Load_uint( &lock->next_ticket, ATOMIC_ORDER_RELAXED );
1042 }
1043
1044 void TicketLockWaitForAcquires(
1045 const TicketLockState *state,
1046 unsigned int acquire_count
1047 )
1048 {
1049 const SMP_ticket_lock_Control *lock;
1050 unsigned int expected;
1051 unsigned int actual;
1052
1053 lock = state->lock;
1054 expected = state->next_ticket + acquire_count;
1055
1056 do {
1057 actual = _Atomic_Load_uint( &lock->next_ticket, ATOMIC_ORDER_RELAXED );
1058 } while ( expected != actual );
1059 }
1060
1061 void TicketLockWaitForReleases(
1062 const TicketLockState *state,
1063 unsigned int release_count
1064 )
1065 {
1066 const SMP_ticket_lock_Control *lock;
1067 unsigned int expected;
1068 unsigned int actual;
1069
1070 lock = state->lock;
1071 expected = state->next_ticket + release_count;
1072
1073 do {
1074 actual = _Atomic_Load_uint( &lock->now_serving, ATOMIC_ORDER_RELAXED );
1075 } while ( expected != actual );
1076 }
1077
1078 #endif