File indexing completed on 2025-05-11 08:24:26
0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049 #ifdef HAVE_CONFIG_H
0050 #include "config.h"
0051 #endif
0052
0053 #include <rtems/score/schedulerpriorityaffinitysmp.h>
0054 #include <rtems/score/schedulerpriorityimpl.h>
0055 #include <rtems/score/schedulersmpimpl.h>
0056 #include <rtems/score/schedulerprioritysmpimpl.h>
0057 #include <rtems/score/priority.h>
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073 static bool _Scheduler_priority_affinity_SMP_Priority_less_equal(
0074 const void *key,
0075 const Chain_Node *to_insert,
0076 const Chain_Node *next
0077 )
0078 {
0079 return next != NULL
0080 && _Scheduler_SMP_Priority_less_equal( key, to_insert, next );
0081 }
0082
0083 static Scheduler_priority_affinity_SMP_Node *
0084 _Scheduler_priority_affinity_SMP_Node_downcast(
0085 Scheduler_Node *node
0086 )
0087 {
0088 return (Scheduler_priority_affinity_SMP_Node *) node;
0089 }
0090
0091
0092
0093
0094
0095 void _Scheduler_priority_affinity_SMP_Node_initialize(
0096 const Scheduler_Control *scheduler,
0097 Scheduler_Node *node,
0098 Thread_Control *the_thread,
0099 Priority_Control priority
0100 )
0101 {
0102 Scheduler_priority_affinity_SMP_Node *the_node;
0103
0104 _Scheduler_priority_SMP_Node_initialize( scheduler, node, the_thread, priority );
0105
0106
0107
0108
0109 the_node = _Scheduler_priority_affinity_SMP_Node_downcast( node );
0110 _Processor_mask_Assign( &the_node->Affinity, _SMP_Get_online_processors() );
0111 }
0112
0113
0114
0115
0116
0117
0118
0119
0120 static Scheduler_Node *_Scheduler_priority_affinity_SMP_Get_highest_ready(
0121 Scheduler_Context *context,
0122 Scheduler_Node *victim
0123 )
0124 {
0125 Scheduler_priority_SMP_Context *self =
0126 _Scheduler_priority_SMP_Get_self( context );
0127 Priority_Control index;
0128 Scheduler_Node *highest = NULL;
0129 Thread_Control *victim_thread;
0130 uint32_t victim_cpu_index;
0131 Scheduler_priority_affinity_SMP_Node *node;
0132
0133
0134
0135
0136 if ( victim == NULL ) {
0137 node = (Scheduler_priority_affinity_SMP_Node *)
0138 _Scheduler_priority_Ready_queue_first(
0139 &self->Bit_map,
0140 &self->Ready[ 0 ]
0141 );
0142
0143 return &node->Base.Base.Base;
0144 }
0145
0146 victim_thread = _Scheduler_Node_get_owner( victim );
0147 victim_cpu_index = _Per_CPU_Get_index( _Thread_Get_CPU( victim_thread ) );
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163 for ( index = _Priority_bit_map_Get_highest( &self->Bit_map ) ;
0164 index <= PRIORITY_MAXIMUM;
0165 index++ )
0166 {
0167 Chain_Control *chain = &self->Ready[index];
0168 Chain_Node *chain_node;
0169 for ( chain_node = _Chain_First( chain );
0170 chain_node != _Chain_Immutable_tail( chain ) ;
0171 chain_node = _Chain_Next( chain_node ) )
0172 {
0173 node = (Scheduler_priority_affinity_SMP_Node *) chain_node;
0174
0175
0176
0177
0178 if ( _Processor_mask_Is_set( &node->Affinity, victim_cpu_index ) ) {
0179 highest = &node->Base.Base.Base;
0180 break;
0181 }
0182 }
0183 if ( highest )
0184 break;
0185 }
0186
0187 _Assert( highest != NULL );
0188
0189 return highest;
0190 }
0191
0192
0193
0194
0195
0196
0197 void _Scheduler_priority_affinity_SMP_Block(
0198 const Scheduler_Control *scheduler,
0199 Thread_Control *thread,
0200 Scheduler_Node *node
0201 )
0202 {
0203 Scheduler_Context *context = _Scheduler_Get_context( scheduler );
0204
0205 _Scheduler_SMP_Block(
0206 context,
0207 thread,
0208 node,
0209 _Scheduler_SMP_Extract_from_scheduled,
0210 _Scheduler_priority_SMP_Extract_from_ready,
0211 _Scheduler_priority_affinity_SMP_Get_highest_ready,
0212 _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
0213 _Scheduler_SMP_Allocate_processor_exact,
0214 _Scheduler_priority_SMP_Get_idle
0215 );
0216
0217
0218
0219
0220
0221
0222
0223 }
0224
0225
0226
0227
0228
0229
0230
0231
0232 static Scheduler_Node * _Scheduler_priority_affinity_SMP_Get_lowest_scheduled(
0233 Scheduler_Context *context,
0234 Scheduler_Node *filter_base
0235 )
0236 {
0237 Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
0238 Scheduler_Node *lowest_scheduled = NULL;
0239 Chain_Control *scheduled = &self->Scheduled;
0240 Chain_Node *chain_node;
0241 Scheduler_priority_affinity_SMP_Node *filter =
0242 _Scheduler_priority_affinity_SMP_Node_downcast( filter_base );
0243
0244 for ( chain_node = _Chain_Last( scheduled );
0245 chain_node != _Chain_Immutable_head( scheduled ) ;
0246 chain_node = _Chain_Previous( chain_node ) ) {
0247 Scheduler_priority_affinity_SMP_Node *node;
0248 Thread_Control *thread;
0249 uint32_t cpu_index;
0250
0251 node = (Scheduler_priority_affinity_SMP_Node *) chain_node;
0252
0253
0254 thread = _Scheduler_Node_get_owner( &node->Base.Base.Base );
0255 cpu_index = _Per_CPU_Get_index( _Thread_Get_CPU( thread ) );
0256
0257 if ( _Processor_mask_Is_set( &filter->Affinity, cpu_index ) ) {
0258 lowest_scheduled = &node->Base.Base.Base;
0259 break;
0260 }
0261
0262 }
0263
0264 return lowest_scheduled;
0265 }
0266
0267
0268
0269
0270
0271
0272 static bool _Scheduler_priority_affinity_SMP_Enqueue_fifo(
0273 Scheduler_Context *context,
0274 Scheduler_Node *node,
0275 Priority_Control insert_priority
0276 )
0277 {
0278 return _Scheduler_SMP_Enqueue(
0279 context,
0280 node,
0281 insert_priority,
0282 _Scheduler_priority_affinity_SMP_Priority_less_equal,
0283 _Scheduler_priority_SMP_Insert_ready,
0284 _Scheduler_SMP_Insert_scheduled,
0285 _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
0286 _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
0287 _Scheduler_priority_affinity_SMP_Get_lowest_scheduled,
0288 _Scheduler_SMP_Allocate_processor_exact,
0289 _Scheduler_priority_SMP_Get_idle,
0290 _Scheduler_priority_SMP_Release_idle
0291 );
0292 }
0293
0294
0295
0296
0297
0298
0299
0300
0301 static void _Scheduler_priority_affinity_SMP_Check_for_migrations(
0302 Scheduler_Context *context
0303 )
0304 {
0305 Scheduler_priority_SMP_Context *self;
0306 Scheduler_Node *lowest_scheduled;
0307 Scheduler_Node *highest_ready;
0308
0309 self = _Scheduler_priority_SMP_Get_self( context );
0310
0311 while (1) {
0312 Priority_Control lowest_scheduled_priority;
0313 Priority_Control insert_priority;
0314
0315 if ( _Priority_bit_map_Is_empty( &self->Bit_map ) ) {
0316
0317 break;
0318 }
0319
0320 highest_ready =
0321 _Scheduler_priority_affinity_SMP_Get_highest_ready( context, NULL );
0322
0323 lowest_scheduled =
0324 _Scheduler_priority_affinity_SMP_Get_lowest_scheduled(
0325 context,
0326 highest_ready
0327 );
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339 if ( lowest_scheduled == NULL )
0340 break;
0341
0342 lowest_scheduled_priority =
0343 _Scheduler_SMP_Node_priority( lowest_scheduled );
0344
0345 if (
0346 _Scheduler_SMP_Priority_less_equal(
0347 &lowest_scheduled_priority,
0348 &lowest_scheduled->Node.Chain,
0349 &highest_ready->Node.Chain
0350 )
0351 ) {
0352 break;
0353 }
0354
0355
0356
0357
0358
0359
0360 _Scheduler_priority_SMP_Extract_from_ready( context, highest_ready );
0361 insert_priority = _Scheduler_SMP_Node_priority( highest_ready );
0362 insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
0363 _Scheduler_SMP_Enqueue_to_scheduled(
0364 context,
0365 highest_ready,
0366 insert_priority,
0367 lowest_scheduled,
0368 _Scheduler_SMP_Insert_scheduled,
0369 _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
0370 _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
0371 _Scheduler_SMP_Allocate_processor_exact,
0372 _Scheduler_priority_SMP_Get_idle,
0373 _Scheduler_priority_SMP_Release_idle
0374 );
0375 }
0376 }
0377
0378
0379
0380
0381 void _Scheduler_priority_affinity_SMP_Unblock(
0382 const Scheduler_Control *scheduler,
0383 Thread_Control *thread,
0384 Scheduler_Node *node
0385 )
0386 {
0387 Scheduler_Context *context = _Scheduler_Get_context( scheduler );
0388
0389 _Scheduler_SMP_Unblock(
0390 context,
0391 thread,
0392 node,
0393 _Scheduler_priority_SMP_Do_update,
0394 _Scheduler_priority_affinity_SMP_Enqueue_fifo,
0395 _Scheduler_priority_SMP_Release_idle
0396 );
0397
0398
0399
0400
0401 _Scheduler_priority_affinity_SMP_Check_for_migrations( context );
0402 }
0403
0404
0405
0406
0407
0408 static bool _Scheduler_priority_affinity_SMP_Enqueue(
0409 Scheduler_Context *context,
0410 Scheduler_Node *node,
0411 Priority_Control insert_priority
0412 )
0413 {
0414 return _Scheduler_SMP_Enqueue(
0415 context,
0416 node,
0417 insert_priority,
0418 _Scheduler_priority_affinity_SMP_Priority_less_equal,
0419 _Scheduler_priority_SMP_Insert_ready,
0420 _Scheduler_SMP_Insert_scheduled,
0421 _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
0422 _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
0423 _Scheduler_priority_affinity_SMP_Get_lowest_scheduled,
0424 _Scheduler_SMP_Allocate_processor_exact,
0425 _Scheduler_priority_SMP_Get_idle,
0426 _Scheduler_priority_SMP_Release_idle
0427 );
0428 }
0429
0430
0431
0432
0433
0434
0435 static void _Scheduler_priority_affinity_SMP_Enqueue_scheduled(
0436 Scheduler_Context *context,
0437 Scheduler_Node *node,
0438 Priority_Control insert_priority
0439 )
0440 {
0441 _Scheduler_SMP_Enqueue_scheduled(
0442 context,
0443 node,
0444 insert_priority,
0445 _Scheduler_SMP_Priority_less_equal,
0446 _Scheduler_priority_SMP_Extract_from_ready,
0447 _Scheduler_priority_affinity_SMP_Get_highest_ready,
0448 _Scheduler_priority_SMP_Insert_ready,
0449 _Scheduler_SMP_Insert_scheduled,
0450 _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
0451 _Scheduler_SMP_Allocate_processor_exact,
0452 _Scheduler_priority_SMP_Get_idle,
0453 _Scheduler_priority_SMP_Release_idle
0454 );
0455 }
0456
0457 static bool _Scheduler_priority_affinity_SMP_Do_ask_for_help(
0458 Scheduler_Context *context,
0459 Thread_Control *the_thread,
0460 Scheduler_Node *node
0461 )
0462 {
0463 return _Scheduler_SMP_Ask_for_help(
0464 context,
0465 the_thread,
0466 node,
0467 _Scheduler_SMP_Priority_less_equal,
0468 _Scheduler_priority_SMP_Insert_ready,
0469 _Scheduler_SMP_Insert_scheduled,
0470 _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
0471 _Scheduler_SMP_Get_lowest_scheduled,
0472 _Scheduler_SMP_Allocate_processor_lazy,
0473 _Scheduler_priority_SMP_Release_idle
0474 );
0475 }
0476
0477 void _Scheduler_priority_affinity_SMP_Update_priority(
0478 const Scheduler_Control *scheduler,
0479 Thread_Control *thread,
0480 Scheduler_Node *node
0481 )
0482 {
0483 Scheduler_Context *context = _Scheduler_Get_context( scheduler );
0484
0485 _Scheduler_SMP_Update_priority(
0486 context,
0487 thread,
0488 node,
0489 _Scheduler_SMP_Extract_from_scheduled,
0490 _Scheduler_priority_SMP_Extract_from_ready,
0491 _Scheduler_priority_SMP_Do_update,
0492 _Scheduler_priority_affinity_SMP_Enqueue,
0493 _Scheduler_priority_affinity_SMP_Enqueue_scheduled,
0494 _Scheduler_priority_affinity_SMP_Do_ask_for_help
0495 );
0496
0497
0498
0499
0500 _Scheduler_priority_affinity_SMP_Check_for_migrations( context );
0501 }
0502
0503 bool _Scheduler_priority_affinity_SMP_Ask_for_help(
0504 const Scheduler_Control *scheduler,
0505 Thread_Control *the_thread,
0506 Scheduler_Node *node
0507 )
0508 {
0509 Scheduler_Context *context = _Scheduler_Get_context( scheduler );
0510
0511 return _Scheduler_priority_affinity_SMP_Do_ask_for_help( context, the_thread, node );
0512 }
0513
0514 void _Scheduler_priority_affinity_SMP_Reconsider_help_request(
0515 const Scheduler_Control *scheduler,
0516 Thread_Control *the_thread,
0517 Scheduler_Node *node
0518 )
0519 {
0520 Scheduler_Context *context = _Scheduler_Get_context( scheduler );
0521
0522 _Scheduler_SMP_Reconsider_help_request(
0523 context,
0524 the_thread,
0525 node,
0526 _Scheduler_priority_SMP_Extract_from_ready
0527 );
0528 }
0529
0530 void _Scheduler_priority_affinity_SMP_Withdraw_node(
0531 const Scheduler_Control *scheduler,
0532 Thread_Control *the_thread,
0533 Scheduler_Node *node,
0534 Thread_Scheduler_state next_state
0535 )
0536 {
0537 Scheduler_Context *context = _Scheduler_Get_context( scheduler );
0538
0539 _Scheduler_SMP_Withdraw_node(
0540 context,
0541 the_thread,
0542 node,
0543 next_state,
0544 _Scheduler_SMP_Extract_from_scheduled,
0545 _Scheduler_priority_SMP_Extract_from_ready,
0546 _Scheduler_priority_affinity_SMP_Get_highest_ready,
0547 _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
0548 _Scheduler_SMP_Allocate_processor_lazy,
0549 _Scheduler_priority_SMP_Get_idle
0550 );
0551 }
0552
0553 void _Scheduler_priority_affinity_SMP_Make_sticky(
0554 const Scheduler_Control *scheduler,
0555 Thread_Control *the_thread,
0556 Scheduler_Node *node
0557 )
0558 {
0559 _Scheduler_SMP_Make_sticky(
0560 scheduler,
0561 the_thread,
0562 node,
0563 _Scheduler_priority_SMP_Do_update,
0564 _Scheduler_priority_affinity_SMP_Enqueue
0565 );
0566 }
0567
0568 void _Scheduler_priority_affinity_SMP_Clean_sticky(
0569 const Scheduler_Control *scheduler,
0570 Thread_Control *the_thread,
0571 Scheduler_Node *node
0572 )
0573 {
0574 _Scheduler_SMP_Clean_sticky(
0575 scheduler,
0576 the_thread,
0577 node,
0578 _Scheduler_SMP_Extract_from_scheduled,
0579 _Scheduler_priority_SMP_Extract_from_ready,
0580 _Scheduler_priority_affinity_SMP_Get_highest_ready,
0581 _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
0582 _Scheduler_SMP_Allocate_processor_exact,
0583 _Scheduler_priority_SMP_Get_idle,
0584 _Scheduler_priority_SMP_Release_idle
0585 );
0586 }
0587
0588 void _Scheduler_priority_affinity_SMP_Add_processor(
0589 const Scheduler_Control *scheduler,
0590 Thread_Control *idle
0591 )
0592 {
0593 Scheduler_Context *context = _Scheduler_Get_context( scheduler );
0594
0595 _Scheduler_SMP_Add_processor(
0596 context,
0597 idle,
0598 _Scheduler_priority_SMP_Has_ready,
0599 _Scheduler_priority_affinity_SMP_Enqueue_scheduled,
0600 _Scheduler_SMP_Do_nothing_register_idle
0601 );
0602 }
0603
0604 Thread_Control *_Scheduler_priority_affinity_SMP_Remove_processor(
0605 const Scheduler_Control *scheduler,
0606 Per_CPU_Control *cpu
0607 )
0608 {
0609 Scheduler_Context *context = _Scheduler_Get_context( scheduler );
0610
0611 return _Scheduler_SMP_Remove_processor(
0612 context,
0613 cpu,
0614 _Scheduler_SMP_Extract_from_scheduled,
0615 _Scheduler_priority_SMP_Extract_from_ready,
0616 _Scheduler_priority_affinity_SMP_Enqueue,
0617 _Scheduler_priority_SMP_Get_idle,
0618 _Scheduler_priority_SMP_Release_idle
0619 );
0620 }
0621
0622 void _Scheduler_priority_affinity_SMP_Yield(
0623 const Scheduler_Control *scheduler,
0624 Thread_Control *thread,
0625 Scheduler_Node *node
0626 )
0627 {
0628 Scheduler_Context *context = _Scheduler_Get_context( scheduler );
0629
0630 _Scheduler_SMP_Yield(
0631 context,
0632 thread,
0633 node,
0634 _Scheduler_SMP_Extract_from_scheduled,
0635 _Scheduler_priority_SMP_Extract_from_ready,
0636 _Scheduler_priority_affinity_SMP_Enqueue,
0637 _Scheduler_priority_affinity_SMP_Enqueue_scheduled
0638 );
0639 }
0640
0641 Status_Control _Scheduler_priority_affinity_SMP_Set_affinity(
0642 const Scheduler_Control *scheduler,
0643 Thread_Control *thread,
0644 Scheduler_Node *node_base,
0645 const Processor_mask *affinity
0646 )
0647 {
0648 Scheduler_Context *context;
0649 Scheduler_priority_affinity_SMP_Node *node;
0650 States_Control current_state;
0651 Processor_mask my_affinity;
0652
0653 context = _Scheduler_Get_context( scheduler );
0654 _Processor_mask_And( &my_affinity, &context->Processors, affinity );
0655
0656 if ( _Processor_mask_Count( &my_affinity ) == 0 ) {
0657 return STATUS_INVALID_NUMBER;
0658 }
0659
0660 node = _Scheduler_priority_affinity_SMP_Node_downcast( node_base );
0661
0662
0663
0664
0665
0666 if ( _Processor_mask_Is_equal( &node->Affinity, affinity ) )
0667 return STATUS_SUCCESSFUL;
0668
0669 current_state = thread->current_state;
0670
0671 if ( _States_Is_ready( current_state ) ) {
0672 _Scheduler_priority_affinity_SMP_Block( scheduler, thread, &node->Base.Base.Base );
0673 }
0674
0675 _Processor_mask_Assign( &node->Affinity, affinity );
0676
0677 if ( _States_Is_ready( current_state ) ) {
0678
0679
0680
0681 (void) _Scheduler_priority_affinity_SMP_Unblock( scheduler, thread, &node->Base.Base.Base );
0682 }
0683
0684 return STATUS_SUCCESSFUL;
0685 }