File indexing completed on 2025-05-11 08:24:13
0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037 #ifndef _RTEMS_SCORE_SCHEDULERSMPIMPL_H
0038 #define _RTEMS_SCORE_SCHEDULERSMPIMPL_H
0039
0040 #include <rtems/score/schedulersmp.h>
0041 #include <rtems/score/assert.h>
0042 #include <rtems/score/chainimpl.h>
0043 #include <rtems/score/schedulersimpleimpl.h>
0044 #include <rtems/bspIo.h>
0045
0046 #ifdef __cplusplus
0047 extern "C" {
0048 #endif
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292 typedef bool ( *Scheduler_SMP_Has_ready )(
0293 Scheduler_Context *context
0294 );
0295
0296 typedef Scheduler_Node *( *Scheduler_SMP_Get_highest_ready )(
0297 Scheduler_Context *context,
0298 Scheduler_Node *filter
0299 );
0300
0301 typedef Scheduler_Node *( *Scheduler_SMP_Get_lowest_ready )(
0302 Scheduler_Context *context
0303 );
0304
0305 typedef Scheduler_Node *( *Scheduler_SMP_Get_lowest_scheduled )(
0306 Scheduler_Context *context,
0307 Scheduler_Node *filter
0308 );
0309
0310 typedef void ( *Scheduler_SMP_Extract )(
0311 Scheduler_Context *context,
0312 Scheduler_Node *node_to_extract
0313 );
0314
0315 typedef void ( *Scheduler_SMP_Insert )(
0316 Scheduler_Context *context,
0317 Scheduler_Node *node_to_insert,
0318 Priority_Control insert_priority
0319 );
0320
0321 typedef void ( *Scheduler_SMP_Move )(
0322 Scheduler_Context *context,
0323 Scheduler_Node *node_to_move
0324 );
0325
0326 typedef bool ( *Scheduler_SMP_Ask_for_help )(
0327 Scheduler_Context *context,
0328 Thread_Control *thread,
0329 Scheduler_Node *node
0330 );
0331
0332 typedef void ( *Scheduler_SMP_Update )(
0333 Scheduler_Context *context,
0334 Scheduler_Node *node_to_update,
0335 Priority_Control new_priority
0336 );
0337
0338 typedef void ( *Scheduler_SMP_Set_affinity )(
0339 Scheduler_Context *context,
0340 Scheduler_Node *node,
0341 void *arg
0342 );
0343
0344 typedef bool ( *Scheduler_SMP_Enqueue )(
0345 Scheduler_Context *context,
0346 Scheduler_Node *node_to_enqueue,
0347 Priority_Control priority
0348 );
0349
0350 typedef void ( *Scheduler_SMP_Enqueue_scheduled )(
0351 Scheduler_Context *context,
0352 Scheduler_Node *node_to_enqueue,
0353 Priority_Control priority
0354 );
0355
0356 typedef void ( *Scheduler_SMP_Allocate_processor )(
0357 Scheduler_Context *context,
0358 Scheduler_Node *scheduled,
0359 Per_CPU_Control *cpu
0360 );
0361
0362 typedef void ( *Scheduler_SMP_Register_idle )(
0363 Scheduler_Context *context,
0364 Scheduler_Node *idle,
0365 Per_CPU_Control *cpu
0366 );
0367
0368
0369
0370
0371
0372
0373
0374
0375 static inline void _Scheduler_SMP_Do_nothing_register_idle(
0376 Scheduler_Context *context,
0377 Scheduler_Node *idle,
0378 Per_CPU_Control *cpu
0379 )
0380 {
0381 (void) context;
0382 (void) idle;
0383 (void) cpu;
0384 }
0385
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398 static inline bool _Scheduler_SMP_Priority_less_equal(
0399 const void *key,
0400 const Chain_Node *to_insert,
0401 const Chain_Node *next
0402 )
0403 {
0404 const Priority_Control *priority_to_insert;
0405 const Scheduler_SMP_Node *node_next;
0406
0407 (void) to_insert;
0408 priority_to_insert = (const Priority_Control *) key;
0409 node_next = (const Scheduler_SMP_Node *) next;
0410
0411 return *priority_to_insert <= node_next->priority;
0412 }
0413
0414
0415
0416
0417
0418
0419
0420
0421 static inline Scheduler_SMP_Context *_Scheduler_SMP_Get_self(
0422 Scheduler_Context *context
0423 )
0424 {
0425 return (Scheduler_SMP_Context *) context;
0426 }
0427
0428
0429
0430
0431
0432
0433 static inline void _Scheduler_SMP_Initialize(
0434 Scheduler_SMP_Context *self
0435 )
0436 {
0437 _Chain_Initialize_empty( &self->Scheduled );
0438 }
0439
0440
0441
0442
0443
0444
0445
0446
0447 static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_node(
0448 Thread_Control *thread
0449 )
0450 {
0451 return (Scheduler_SMP_Node *) _Thread_Scheduler_get_home_node( thread );
0452 }
0453
0454
0455
0456
0457
0458
0459
0460
0461 static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_own_node(
0462 Thread_Control *thread
0463 )
0464 {
0465 return (Scheduler_SMP_Node *) _Thread_Scheduler_get_home_node( thread );
0466 }
0467
0468
0469
0470
0471
0472
0473
0474
0475 static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_downcast(
0476 Scheduler_Node *node
0477 )
0478 {
0479 return (Scheduler_SMP_Node *) node;
0480 }
0481
0482
0483
0484
0485
0486
0487
0488
0489 static inline Scheduler_SMP_Node_state _Scheduler_SMP_Node_state(
0490 const Scheduler_Node *node
0491 )
0492 {
0493 return ( (const Scheduler_SMP_Node *) node )->state;
0494 }
0495
0496
0497
0498
0499
0500
0501
0502
0503 static inline Priority_Control _Scheduler_SMP_Node_priority(
0504 const Scheduler_Node *node
0505 )
0506 {
0507 return ( (const Scheduler_SMP_Node *) node )->priority;
0508 }
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518 static inline void _Scheduler_SMP_Node_initialize(
0519 const Scheduler_Control *scheduler,
0520 Scheduler_SMP_Node *node,
0521 Thread_Control *thread,
0522 Priority_Control priority
0523 )
0524 {
0525 _Scheduler_Node_do_initialize( scheduler, &node->Base, thread, priority );
0526 node->state = SCHEDULER_SMP_NODE_BLOCKED;
0527 node->priority = priority;
0528 }
0529
0530
0531
0532
0533
0534
0535
0536 static inline void _Scheduler_SMP_Node_update_priority(
0537 Scheduler_SMP_Node *node,
0538 Priority_Control new_priority
0539 )
0540 {
0541 node->priority = new_priority;
0542 }
0543
0544
0545
0546
0547
0548
0549
0550 static inline void _Scheduler_SMP_Node_change_state(
0551 Scheduler_Node *node,
0552 Scheduler_SMP_Node_state new_state
0553 )
0554 {
0555 Scheduler_SMP_Node *the_node;
0556
0557 the_node = _Scheduler_SMP_Node_downcast( node );
0558 the_node->state = new_state;
0559 }
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570 static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
0571 const Scheduler_Context *context,
0572 const Per_CPU_Control *cpu
0573 )
0574 {
0575 return cpu->Scheduler.context == context;
0576 }
0577
0578
0579
0580
0581
0582
0583
0584
0585
0586
0587
0588 void _Scheduler_SMP_Remove_ask_for_help_from_processor(
0589 Thread_Control *thread,
0590 Per_CPU_Control *cpu
0591 );
0592
0593
0594
0595
0596
0597
0598
0599
0600 static inline void _Scheduler_SMP_Cancel_ask_for_help( Thread_Control *thread )
0601 {
0602 Per_CPU_Control *cpu;
0603
0604 _Assert( _ISR_lock_Is_owner( &thread->Scheduler.Lock ) );
0605 cpu = thread->Scheduler.ask_for_help_cpu;
0606
0607 if ( RTEMS_PREDICT_FALSE( cpu != NULL ) ) {
0608 _Scheduler_SMP_Remove_ask_for_help_from_processor( thread, cpu );
0609 }
0610 }
0611
0612
0613
0614
0615
0616
0617
0618
0619
0620
0621
0622
0623
0624
0625
0626 static inline void _Scheduler_SMP_Request_ask_for_help( Thread_Control *thread )
0627 {
0628 ISR_lock_Context lock_context;
0629 Per_CPU_Control *cpu_self;
0630
0631 cpu_self = _Per_CPU_Get();
0632
0633 _Assert( thread->Scheduler.ask_for_help_cpu == NULL );
0634 thread->Scheduler.ask_for_help_cpu = cpu_self;
0635 cpu_self->dispatch_necessary = true;
0636
0637 _Per_CPU_Acquire( cpu_self, &lock_context );
0638 _Chain_Append_unprotected(
0639 &cpu_self->Threads_in_need_for_help,
0640 &thread->Scheduler.Help_node
0641 );
0642 _Per_CPU_Release( cpu_self, &lock_context );
0643 }
0644
0645
0646
0647
0648
0649 typedef enum {
0650 SCHEDULER_SMP_DO_SCHEDULE,
0651 SCHEDULER_SMP_DO_NOT_SCHEDULE
0652 } Scheduler_SMP_Action;
0653
0654
0655
0656
0657
0658
0659
0660
0661
0662
0663
0664
0665
0666
0667
0668
0669
0670
0671
0672
0673
0674
0675
0676
0677
0678
0679
0680 static inline Scheduler_SMP_Action _Scheduler_SMP_Try_to_schedule(
0681 Scheduler_Node *node,
0682 Scheduler_Get_idle_node get_idle_node,
0683 void *arg
0684 )
0685 {
0686 ISR_lock_Context lock_context;
0687 Thread_Control *owner;
0688 Thread_Scheduler_state owner_state;
0689 int owner_sticky_level;
0690
0691 owner = _Scheduler_Node_get_owner( node );
0692 _Assert( _Scheduler_Node_get_idle( node ) == NULL );
0693
0694 _Thread_Scheduler_acquire_critical( owner, &lock_context );
0695 owner_state = owner->Scheduler.state;
0696 owner_sticky_level = node->sticky_level;
0697
0698 if ( RTEMS_PREDICT_TRUE( owner_state == THREAD_SCHEDULER_READY ) ) {
0699 _Scheduler_SMP_Cancel_ask_for_help( owner );
0700 _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
0701 _Thread_Scheduler_release_critical( owner, &lock_context );
0702 return SCHEDULER_SMP_DO_SCHEDULE;
0703 }
0704
0705 _Thread_Scheduler_release_critical( owner, &lock_context );
0706
0707 if (
0708 ( owner_state == THREAD_SCHEDULER_SCHEDULED && owner_sticky_level <= 1 ) ||
0709 owner_sticky_level == 0
0710 ) {
0711 _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
0712
0713 return SCHEDULER_SMP_DO_NOT_SCHEDULE;
0714 }
0715
0716 (void) _Scheduler_Use_idle_thread( node, get_idle_node, arg );
0717
0718 return SCHEDULER_SMP_DO_SCHEDULE;
0719 }
0720
0721
0722
0723
0724
0725
0726
0727
0728
0729
0730
0731
0732 static inline void _Scheduler_SMP_Allocate_processor_lazy(
0733 Scheduler_Context *context,
0734 Scheduler_Node *scheduled,
0735 Per_CPU_Control *cpu
0736 )
0737 {
0738 Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
0739 Per_CPU_Control *scheduled_cpu = _Thread_Get_CPU( scheduled_thread );
0740 Per_CPU_Control *cpu_self = _Per_CPU_Get();
0741
0742 _Assert( _ISR_Get_level() != 0 );
0743
0744 if ( cpu == scheduled_cpu ) {
0745 _Thread_Set_CPU( scheduled_thread, cpu );
0746 _Thread_Dispatch_update_heir( cpu_self, cpu, scheduled_thread );
0747
0748 return;
0749 }
0750
0751 if (
0752 _Thread_Is_executing_on_a_processor( scheduled_thread ) &&
0753 _Scheduler_SMP_Is_processor_owned_by_us( context, scheduled_cpu )
0754 ) {
0755 Thread_Control *heir = scheduled_cpu->heir;
0756 _Thread_Dispatch_update_heir( cpu_self, scheduled_cpu, scheduled_thread );
0757 _Thread_Set_CPU( heir, cpu );
0758 _Thread_Dispatch_update_heir( cpu_self, cpu, heir );
0759
0760 return;
0761 }
0762
0763 _Thread_Set_CPU( scheduled_thread, cpu );
0764 _Thread_Dispatch_update_heir( cpu_self, cpu, scheduled_thread );
0765 }
0766
0767
0768
0769
0770
0771
0772
0773
0774
0775
0776
0777
0778
0779
0780
0781 static inline void _Scheduler_SMP_Allocate_processor_exact(
0782 Scheduler_Context *context,
0783 Scheduler_Node *scheduled,
0784 Per_CPU_Control *cpu
0785 )
0786 {
0787 Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
0788 Per_CPU_Control *cpu_self = _Per_CPU_Get();
0789
0790 (void) context;
0791
0792 _Thread_Set_CPU( scheduled_thread, cpu );
0793 _Thread_Dispatch_update_heir( cpu_self, cpu, scheduled_thread );
0794 }
0795
0796
0797
0798
0799
0800
0801
0802
0803
0804
0805
0806
0807
0808 static inline void _Scheduler_SMP_Allocate_processor(
0809 Scheduler_Context *context,
0810 Scheduler_Node *scheduled,
0811 Per_CPU_Control *cpu,
0812 Scheduler_SMP_Allocate_processor allocate_processor
0813 )
0814 {
0815 _Scheduler_SMP_Node_change_state( scheduled, SCHEDULER_SMP_NODE_SCHEDULED );
0816 ( *allocate_processor )( context, scheduled, cpu );
0817 }
0818
0819
0820
0821
0822
0823
0824
0825
0826
0827
0828
0829
0830
0831
0832
0833
0834 static inline void _Scheduler_SMP_Preempt(
0835 Scheduler_Context *context,
0836 Scheduler_Node *scheduled,
0837 Scheduler_Node *victim,
0838 Thread_Control *victim_idle,
0839 Scheduler_SMP_Allocate_processor allocate_processor
0840 )
0841 {
0842 Thread_Control *victim_owner;
0843 ISR_lock_Context lock_context;
0844 Per_CPU_Control *cpu;
0845
0846 _Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_READY );
0847
0848 victim_owner = _Scheduler_Node_get_owner( victim );
0849 _Thread_Scheduler_acquire_critical( victim_owner, &lock_context );
0850
0851 if ( RTEMS_PREDICT_TRUE( victim_idle == NULL ) ) {
0852 if ( victim_owner->Scheduler.state == THREAD_SCHEDULER_SCHEDULED ) {
0853 _Scheduler_Thread_change_state( victim_owner, THREAD_SCHEDULER_READY );
0854
0855 if ( victim_owner->Scheduler.helping_nodes > 0 ) {
0856 _Scheduler_SMP_Request_ask_for_help( victim_owner );
0857 }
0858 }
0859
0860 cpu = _Thread_Get_CPU( victim_owner );
0861 } else {
0862 cpu = _Thread_Get_CPU( victim_idle );
0863 }
0864
0865 _Thread_Scheduler_release_critical( victim_owner, &lock_context );
0866
0867 _Scheduler_SMP_Allocate_processor(
0868 context,
0869 scheduled,
0870 cpu,
0871 allocate_processor
0872 );
0873 }
0874
0875
0876
0877
0878
0879
0880
0881
0882
0883 static inline Scheduler_Node *_Scheduler_SMP_Get_lowest_scheduled(
0884 Scheduler_Context *context,
0885 Scheduler_Node *filter
0886 )
0887 {
0888 Scheduler_SMP_Context *self;
0889 Scheduler_Node *lowest_scheduled;
0890
0891 (void) filter;
0892
0893 self = _Scheduler_SMP_Get_self( context );
0894
0895 _Assert( !_Chain_Is_empty( &self->Scheduled ) );
0896 lowest_scheduled = (Scheduler_Node *) _Chain_Last( &self->Scheduled );
0897
0898 _Assert(
0899 _Chain_Next( &lowest_scheduled->Node.Chain ) ==
0900 _Chain_Tail( &self->Scheduled )
0901 );
0902
0903 return lowest_scheduled;
0904 }
0905
0906
0907
0908
0909
0910
0911
0912
0913
0914
0915
0916
0917
0918
0919
0920
0921
0922 static inline void _Scheduler_SMP_Enqueue_to_scheduled(
0923 Scheduler_Context *context,
0924 Scheduler_Node *node,
0925 Priority_Control priority,
0926 Scheduler_Node *lowest_scheduled,
0927 Scheduler_SMP_Insert insert_scheduled,
0928 Scheduler_SMP_Move move_from_scheduled_to_ready,
0929 Scheduler_SMP_Move move_from_ready_to_scheduled,
0930 Scheduler_SMP_Allocate_processor allocate_processor,
0931 Scheduler_Get_idle_node get_idle_node,
0932 Scheduler_Release_idle_node release_idle_node
0933 )
0934 {
0935 Thread_Control *lowest_scheduled_idle;
0936 Scheduler_SMP_Action action;
0937
0938 lowest_scheduled_idle = _Scheduler_Release_idle_thread_if_necessary(
0939 lowest_scheduled,
0940 release_idle_node,
0941 context
0942 );
0943
0944 ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
0945
0946 action = _Scheduler_SMP_Try_to_schedule( node, get_idle_node, context );
0947
0948 if ( RTEMS_PREDICT_TRUE( action == SCHEDULER_SMP_DO_SCHEDULE ) ) {
0949 _Scheduler_SMP_Preempt(
0950 context,
0951 node,
0952 lowest_scheduled,
0953 lowest_scheduled_idle,
0954 allocate_processor
0955 );
0956
0957 ( *insert_scheduled )( context, node, priority );
0958 } else {
0959 _Assert( action == SCHEDULER_SMP_DO_NOT_SCHEDULE );
0960
0961 if ( lowest_scheduled_idle != NULL ) {
0962 (void) _Scheduler_Use_idle_thread( lowest_scheduled, get_idle_node, context );
0963 }
0964
0965 ( *move_from_ready_to_scheduled )( context, lowest_scheduled );
0966 }
0967 }
0968
0969
0970
0971
0972
0973
0974
0975
0976
0977
0978
0979
0980
0981
0982
0983
0984
0985
0986
0987
0988
0989
0990
0991 static inline bool _Scheduler_SMP_Enqueue(
0992 Scheduler_Context *context,
0993 Scheduler_Node *node,
0994 Priority_Control insert_priority,
0995 Chain_Node_order order,
0996 Scheduler_SMP_Insert insert_ready,
0997 Scheduler_SMP_Insert insert_scheduled,
0998 Scheduler_SMP_Move move_from_scheduled_to_ready,
0999 Scheduler_SMP_Move move_from_ready_to_scheduled,
1000 Scheduler_SMP_Get_lowest_scheduled get_lowest_scheduled,
1001 Scheduler_SMP_Allocate_processor allocate_processor,
1002 Scheduler_Get_idle_node get_idle_node,
1003 Scheduler_Release_idle_node release_idle_node
1004 )
1005 {
1006 bool needs_help;
1007 Scheduler_Node *lowest_scheduled;
1008
1009 lowest_scheduled = ( *get_lowest_scheduled )( context, node );
1010
1011 if (
1012 ( *order )(
1013 &insert_priority,
1014 &node->Node.Chain,
1015 &lowest_scheduled->Node.Chain
1016 )
1017 ) {
1018 _Scheduler_SMP_Enqueue_to_scheduled(
1019 context,
1020 node,
1021 insert_priority,
1022 lowest_scheduled,
1023 insert_scheduled,
1024 move_from_scheduled_to_ready,
1025 move_from_ready_to_scheduled,
1026 allocate_processor,
1027 get_idle_node,
1028 release_idle_node
1029 );
1030 needs_help = false;
1031 } else {
1032 _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
1033 ( *insert_ready )( context, node, insert_priority );
1034 needs_help = true;
1035 }
1036
1037 return needs_help;
1038 }
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059 static inline void _Scheduler_SMP_Enqueue_scheduled(
1060 Scheduler_Context *context,
1061 Scheduler_Node *const node,
1062 Priority_Control insert_priority,
1063 Chain_Node_order order,
1064 Scheduler_SMP_Extract extract_from_ready,
1065 Scheduler_SMP_Get_highest_ready get_highest_ready,
1066 Scheduler_SMP_Insert insert_ready,
1067 Scheduler_SMP_Insert insert_scheduled,
1068 Scheduler_SMP_Move move_from_ready_to_scheduled,
1069 Scheduler_SMP_Allocate_processor allocate_processor,
1070 Scheduler_Get_idle_node get_idle_node,
1071 Scheduler_Release_idle_node release_idle_node
1072 )
1073 {
1074 Thread_Control *node_idle;
1075
1076 node_idle = _Scheduler_Release_idle_thread_if_necessary(
1077 node,
1078 release_idle_node,
1079 context
1080 );
1081
1082 while ( true ) {
1083 Scheduler_Node *highest_ready;
1084 Scheduler_SMP_Action action;
1085
1086 highest_ready = ( *get_highest_ready )( context, node );
1087
1088
1089
1090
1091
1092 if (
1093 node->sticky_level > 0 && ( *order )(
1094 &insert_priority,
1095 &node->Node.Chain,
1096 &highest_ready->Node.Chain
1097 )
1098 ) {
1099 if ( node_idle != NULL ) {
1100 Thread_Control *owner;
1101 ISR_lock_Context lock_context;
1102
1103 owner = _Scheduler_Node_get_owner( node );
1104 _Thread_Scheduler_acquire_critical( owner, &lock_context );
1105
1106 if ( owner->Scheduler.state == THREAD_SCHEDULER_READY ) {
1107 Per_CPU_Control *cpu;
1108
1109 _Scheduler_SMP_Cancel_ask_for_help( owner );
1110 _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
1111 cpu = _Thread_Get_CPU( node_idle );
1112 _Thread_Set_CPU( owner, cpu );
1113 _Thread_Scheduler_release_critical( owner, &lock_context );
1114 _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, owner );
1115 } else {
1116 Thread_Control *new_idle;
1117
1118 _Thread_Scheduler_release_critical( owner, &lock_context );
1119 new_idle = _Scheduler_Use_idle_thread( node, get_idle_node, context );
1120 _Assert_Unused_variable_equals( new_idle, node_idle );
1121 }
1122 }
1123
1124 ( *insert_scheduled )( context, node, insert_priority );
1125
1126 return;
1127 }
1128
1129 action = _Scheduler_SMP_Try_to_schedule(
1130 highest_ready,
1131 get_idle_node,
1132 context
1133 );
1134
1135 if ( RTEMS_PREDICT_TRUE( action == SCHEDULER_SMP_DO_SCHEDULE ) ) {
1136 _Scheduler_SMP_Preempt(
1137 context,
1138 highest_ready,
1139 node,
1140 node_idle,
1141 allocate_processor
1142 );
1143
1144 ( *move_from_ready_to_scheduled )( context, highest_ready );
1145 ( *insert_ready )( context, node, insert_priority );
1146 return;
1147 }
1148
1149 _Assert( action == SCHEDULER_SMP_DO_NOT_SCHEDULE );
1150 ( *extract_from_ready )( context, highest_ready );
1151 }
1152 }
1153
1154
1155
1156
1157
1158
1159
1160 static inline void _Scheduler_SMP_Extract_from_scheduled(
1161 Scheduler_Context *context,
1162 Scheduler_Node *node
1163 )
1164 {
1165 (void) context;
1166 _Chain_Extract_unprotected( &node->Node.Chain );
1167 }
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185 static inline void _Scheduler_SMP_Schedule_highest_ready(
1186 Scheduler_Context *context,
1187 Scheduler_Node *victim,
1188 Per_CPU_Control *cpu,
1189 Scheduler_SMP_Extract extract_from_scheduled,
1190 Scheduler_SMP_Extract extract_from_ready,
1191 Scheduler_SMP_Get_highest_ready get_highest_ready,
1192 Scheduler_SMP_Move move_from_ready_to_scheduled,
1193 Scheduler_SMP_Allocate_processor allocate_processor,
1194 Scheduler_Get_idle_node get_idle_node
1195 )
1196 {
1197 Scheduler_SMP_Action action;
1198
1199 _Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_BLOCKED );
1200 ( *extract_from_scheduled )( context, victim );
1201
1202 while ( true ) {
1203 Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
1204
1205 action = _Scheduler_SMP_Try_to_schedule(
1206 highest_ready,
1207 get_idle_node,
1208 context
1209 );
1210
1211 if ( RTEMS_PREDICT_TRUE( action == SCHEDULER_SMP_DO_SCHEDULE ) ) {
1212 _Scheduler_SMP_Allocate_processor(
1213 context,
1214 highest_ready,
1215 cpu,
1216 allocate_processor
1217 );
1218
1219 ( *move_from_ready_to_scheduled )( context, highest_ready );
1220 return;
1221 }
1222
1223 _Assert( action == SCHEDULER_SMP_DO_NOT_SCHEDULE );
1224 ( *extract_from_ready )( context, highest_ready );
1225 }
1226 }
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241 static inline void _Scheduler_SMP_Preempt_and_schedule_highest_ready(
1242 Scheduler_Context *context,
1243 Scheduler_Node *victim,
1244 Scheduler_SMP_Extract extract_from_ready,
1245 Scheduler_SMP_Get_highest_ready get_highest_ready,
1246 Scheduler_SMP_Move move_from_ready_to_scheduled,
1247 Scheduler_SMP_Allocate_processor allocate_processor,
1248 Scheduler_Get_idle_node get_idle_node,
1249 Scheduler_Release_idle_node release_idle_node
1250 )
1251 {
1252 Thread_Control *victim_idle;
1253 Scheduler_SMP_Action action;
1254
1255 _Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_READY );
1256 victim_idle = _Scheduler_Release_idle_thread_if_necessary(
1257 victim,
1258 release_idle_node,
1259 context
1260 );
1261
1262 while ( true ) {
1263 Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
1264
1265 action = _Scheduler_SMP_Try_to_schedule(
1266 highest_ready,
1267 get_idle_node,
1268 context
1269 );
1270
1271 if ( RTEMS_PREDICT_TRUE( action == SCHEDULER_SMP_DO_SCHEDULE ) ) {
1272 _Scheduler_SMP_Preempt(
1273 context,
1274 highest_ready,
1275 victim,
1276 victim_idle,
1277 allocate_processor
1278 );
1279
1280 ( *move_from_ready_to_scheduled )( context, highest_ready );
1281 return;
1282 }
1283
1284 _Assert( action == SCHEDULER_SMP_DO_NOT_SCHEDULE );
1285 ( *extract_from_ready )( context, highest_ready );
1286 }
1287 }
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305 static inline void _Scheduler_SMP_Block(
1306 Scheduler_Context *context,
1307 Thread_Control *thread,
1308 Scheduler_Node *node,
1309 Scheduler_SMP_Extract extract_from_scheduled,
1310 Scheduler_SMP_Extract extract_from_ready,
1311 Scheduler_SMP_Get_highest_ready get_highest_ready,
1312 Scheduler_SMP_Move move_from_ready_to_scheduled,
1313 Scheduler_SMP_Allocate_processor allocate_processor,
1314 Scheduler_Get_idle_node get_idle_node
1315 )
1316 {
1317 int sticky_level;
1318 ISR_lock_Context lock_context;
1319 Scheduler_SMP_Node_state node_state;
1320 Per_CPU_Control *cpu;
1321
1322 sticky_level = node->sticky_level;
1323 --sticky_level;
1324 node->sticky_level = sticky_level;
1325 _Assert( sticky_level >= 0 );
1326
1327 _Thread_Scheduler_acquire_critical( thread, &lock_context );
1328 _Scheduler_SMP_Cancel_ask_for_help( thread );
1329 cpu = _Thread_Get_CPU( thread );
1330 _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
1331 _Thread_Scheduler_release_critical( thread, &lock_context );
1332
1333 node_state = _Scheduler_SMP_Node_state( node );
1334
1335 if ( RTEMS_PREDICT_FALSE( sticky_level > 0 ) ) {
1336 if (
1337 node_state == SCHEDULER_SMP_NODE_SCHEDULED &&
1338 _Scheduler_Node_get_idle( node ) == NULL
1339 ) {
1340 Thread_Control *idle;
1341
1342 idle = _Scheduler_Use_idle_thread( node, get_idle_node, context );
1343 _Thread_Set_CPU( idle, cpu );
1344 _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, idle );
1345 }
1346
1347 return;
1348 }
1349
1350 _Assert( _Scheduler_Node_get_user( node ) == thread );
1351 _Assert( _Scheduler_Node_get_idle( node ) == NULL );
1352
1353 if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1354 _Scheduler_SMP_Schedule_highest_ready(
1355 context,
1356 node,
1357 cpu,
1358 extract_from_scheduled,
1359 extract_from_ready,
1360 get_highest_ready,
1361 move_from_ready_to_scheduled,
1362 allocate_processor,
1363 get_idle_node
1364 );
1365 } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1366 _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
1367 ( *extract_from_ready )( context, node );
1368 }
1369 }
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381 static inline void _Scheduler_SMP_Unblock(
1382 Scheduler_Context *context,
1383 Thread_Control *thread,
1384 Scheduler_Node *node,
1385 Scheduler_SMP_Update update,
1386 Scheduler_SMP_Enqueue enqueue,
1387 Scheduler_Release_idle_node release_idle_node
1388 )
1389 {
1390 Scheduler_SMP_Node_state node_state;
1391 Priority_Control priority;
1392
1393 _Assert( _Chain_Is_node_off_chain( &thread->Scheduler.Help_node ) );
1394
1395 ++node->sticky_level;
1396 _Assert( node->sticky_level > 0 );
1397
1398 node_state = _Scheduler_SMP_Node_state( node );
1399
1400 if ( RTEMS_PREDICT_FALSE( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) ) {
1401 _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
1402 _Scheduler_Discard_idle_thread(
1403 thread,
1404 node,
1405 release_idle_node,
1406 context
1407 );
1408
1409 return;
1410 }
1411
1412 _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_READY );
1413
1414 priority = _Scheduler_Node_get_priority( node );
1415 priority = SCHEDULER_PRIORITY_PURIFY( priority );
1416
1417 if ( priority != _Scheduler_SMP_Node_priority( node ) ) {
1418 ( *update )( context, node, priority );
1419 }
1420
1421 if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
1422 Priority_Control insert_priority;
1423 bool needs_help;
1424
1425 insert_priority = SCHEDULER_PRIORITY_APPEND( priority );
1426 needs_help = ( *enqueue )( context, node, insert_priority );
1427
1428 if ( needs_help && thread->Scheduler.helping_nodes > 0 ) {
1429 _Scheduler_SMP_Request_ask_for_help( thread );
1430 }
1431 } else {
1432 _Assert( node_state == SCHEDULER_SMP_NODE_READY );
1433 _Assert( node->sticky_level > 0 );
1434 _Assert( node->idle == NULL );
1435 _Scheduler_SMP_Request_ask_for_help( thread );
1436 }
1437 }
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460 static inline void _Scheduler_SMP_Update_priority(
1461 Scheduler_Context *context,
1462 Thread_Control *thread,
1463 Scheduler_Node *node,
1464 Scheduler_SMP_Extract extract_from_scheduled,
1465 Scheduler_SMP_Extract extract_from_ready,
1466 Scheduler_SMP_Update update,
1467 Scheduler_SMP_Enqueue enqueue,
1468 Scheduler_SMP_Enqueue_scheduled enqueue_scheduled,
1469 Scheduler_SMP_Ask_for_help ask_for_help
1470 )
1471 {
1472 Priority_Control priority;
1473 Priority_Control insert_priority;
1474 Scheduler_SMP_Node_state node_state;
1475
1476 insert_priority = _Scheduler_Node_get_priority( node );
1477 priority = SCHEDULER_PRIORITY_PURIFY( insert_priority );
1478
1479 if ( priority == _Scheduler_SMP_Node_priority( node ) ) {
1480 if ( _Thread_Is_ready( thread ) ) {
1481 ( *ask_for_help )( context, thread, node );
1482 }
1483
1484 return;
1485 }
1486
1487 node_state = _Scheduler_SMP_Node_state( node );
1488
1489 if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1490 ( *extract_from_scheduled )( context, node );
1491 ( *update )( context, node, priority );
1492 ( *enqueue_scheduled )( context, node, insert_priority );
1493 } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1494 ( *extract_from_ready )( context, node );
1495 ( *update )( context, node, priority );
1496 ( *enqueue )( context, node, insert_priority );
1497 } else {
1498 ( *update )( context, node, priority );
1499
1500 if ( _Thread_Is_ready( thread ) ) {
1501 ( *ask_for_help )( context, thread, node );
1502 }
1503 }
1504 }
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519 static inline void _Scheduler_SMP_Yield(
1520 Scheduler_Context *context,
1521 Thread_Control *thread,
1522 Scheduler_Node *node,
1523 Scheduler_SMP_Extract extract_from_scheduled,
1524 Scheduler_SMP_Extract extract_from_ready,
1525 Scheduler_SMP_Enqueue enqueue,
1526 Scheduler_SMP_Enqueue_scheduled enqueue_scheduled
1527 )
1528 {
1529 Scheduler_SMP_Node_state node_state;
1530 Priority_Control insert_priority;
1531
1532 node_state = _Scheduler_SMP_Node_state( node );
1533 insert_priority = _Scheduler_SMP_Node_priority( node );
1534 insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
1535
1536 if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1537 ( *extract_from_scheduled )( context, node );
1538 ( *enqueue_scheduled )( context, node, insert_priority );
1539 } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1540 ( *extract_from_ready )( context, node );
1541 (void) ( *enqueue )( context, node, insert_priority );
1542 }
1543 }
1544
1545
1546
1547
1548
1549
1550
1551
1552 static inline void _Scheduler_SMP_Insert_scheduled(
1553 Scheduler_Context *context,
1554 Scheduler_Node *node_to_insert,
1555 Priority_Control priority_to_insert
1556 )
1557 {
1558 Scheduler_SMP_Context *self;
1559
1560 self = _Scheduler_SMP_Get_self( context );
1561
1562 _Chain_Insert_ordered_unprotected(
1563 &self->Scheduled,
1564 &node_to_insert->Node.Chain,
1565 &priority_to_insert,
1566 _Scheduler_SMP_Priority_less_equal
1567 );
1568 }
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592 static inline bool _Scheduler_SMP_Ask_for_help(
1593 Scheduler_Context *context,
1594 Thread_Control *thread,
1595 Scheduler_Node *node,
1596 Chain_Node_order order,
1597 Scheduler_SMP_Insert insert_ready,
1598 Scheduler_SMP_Insert insert_scheduled,
1599 Scheduler_SMP_Move move_from_scheduled_to_ready,
1600 Scheduler_SMP_Get_lowest_scheduled get_lowest_scheduled,
1601 Scheduler_SMP_Allocate_processor allocate_processor,
1602 Scheduler_Release_idle_node release_idle_node
1603 )
1604 {
1605 Scheduler_Node *lowest_scheduled;
1606 ISR_lock_Context lock_context;
1607 bool success;
1608
1609 if ( thread->Scheduler.pinned_scheduler != NULL ) {
1610
1611
1612
1613
1614 return true;
1615 }
1616
1617 lowest_scheduled = ( *get_lowest_scheduled )( context, node );
1618
1619 _Thread_Scheduler_acquire_critical( thread, &lock_context );
1620
1621 if ( thread->Scheduler.state == THREAD_SCHEDULER_READY ) {
1622 Scheduler_SMP_Node_state node_state;
1623
1624 node_state = _Scheduler_SMP_Node_state( node );
1625
1626 if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
1627 Priority_Control insert_priority;
1628
1629 insert_priority = _Scheduler_SMP_Node_priority( node );
1630
1631 if (
1632 ( *order )(
1633 &insert_priority,
1634 &node->Node.Chain,
1635 &lowest_scheduled->Node.Chain
1636 )
1637 ) {
1638 Thread_Control *lowest_scheduled_idle;
1639
1640 _Scheduler_SMP_Cancel_ask_for_help( thread );
1641 _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
1642 _Thread_Scheduler_release_critical( thread, &lock_context );
1643
1644 lowest_scheduled_idle = _Scheduler_Release_idle_thread_if_necessary(
1645 lowest_scheduled,
1646 release_idle_node,
1647 context
1648 );
1649
1650 _Scheduler_SMP_Preempt(
1651 context,
1652 node,
1653 lowest_scheduled,
1654 lowest_scheduled_idle,
1655 allocate_processor
1656 );
1657
1658 ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
1659 ( *insert_scheduled )( context, node, insert_priority );
1660
1661 success = true;
1662 } else {
1663 _Thread_Scheduler_release_critical( thread, &lock_context );
1664
1665 _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
1666 ( *insert_ready )( context, node, insert_priority );
1667 success = false;
1668 }
1669 } else if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1670 _Scheduler_SMP_Cancel_ask_for_help( thread );
1671 _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
1672 _Thread_Scheduler_release_critical( thread, &lock_context );
1673 _Scheduler_Discard_idle_thread(
1674 thread,
1675 node,
1676 release_idle_node,
1677 context
1678 );
1679 success = true;
1680 } else {
1681 _Thread_Scheduler_release_critical( thread, &lock_context );
1682 success = false;
1683 }
1684 } else {
1685 _Thread_Scheduler_release_critical( thread, &lock_context );
1686 success = false;
1687 }
1688
1689 return success;
1690 }
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701 static inline void _Scheduler_SMP_Reconsider_help_request(
1702 Scheduler_Context *context,
1703 Thread_Control *thread,
1704 Scheduler_Node *node,
1705 Scheduler_SMP_Extract extract_from_ready
1706 )
1707 {
1708 ISR_lock_Context lock_context;
1709
1710 _Thread_Scheduler_acquire_critical( thread, &lock_context );
1711
1712 if (
1713 thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED
1714 && _Scheduler_SMP_Node_state( node ) == SCHEDULER_SMP_NODE_READY
1715 && node->sticky_level == 1
1716 ) {
1717 _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
1718 ( *extract_from_ready )( context, node );
1719 }
1720
1721 _Thread_Scheduler_release_critical( thread, &lock_context );
1722 }
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741 static inline void _Scheduler_SMP_Withdraw_node(
1742 Scheduler_Context *context,
1743 Thread_Control *thread,
1744 Scheduler_Node *node,
1745 Thread_Scheduler_state next_state,
1746 Scheduler_SMP_Extract extract_from_scheduled,
1747 Scheduler_SMP_Extract extract_from_ready,
1748 Scheduler_SMP_Get_highest_ready get_highest_ready,
1749 Scheduler_SMP_Move move_from_ready_to_scheduled,
1750 Scheduler_SMP_Allocate_processor allocate_processor,
1751 Scheduler_Get_idle_node get_idle_node
1752 )
1753 {
1754 ISR_lock_Context lock_context;
1755 Scheduler_SMP_Node_state node_state;
1756
1757 _Thread_Scheduler_acquire_critical( thread, &lock_context );
1758
1759 node_state = _Scheduler_SMP_Node_state( node );
1760
1761 if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1762 Per_CPU_Control *cpu;
1763
1764 _Assert( thread == _Scheduler_Node_get_user( node ) );
1765 cpu = _Thread_Get_CPU( thread );
1766 _Scheduler_Thread_change_state( thread, next_state );
1767 _Thread_Scheduler_release_critical( thread, &lock_context );
1768
1769 _Assert( _Scheduler_Node_get_user( node ) == thread );
1770 _Assert( _Scheduler_Node_get_idle( node ) == NULL );
1771
1772 _Scheduler_SMP_Schedule_highest_ready(
1773 context,
1774 node,
1775 cpu,
1776 extract_from_scheduled,
1777 extract_from_ready,
1778 get_highest_ready,
1779 move_from_ready_to_scheduled,
1780 allocate_processor,
1781 get_idle_node
1782 );
1783 } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1784 _Thread_Scheduler_release_critical( thread, &lock_context );
1785 _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
1786 ( *extract_from_ready )( context, node );
1787 } else {
1788 _Assert( node_state == SCHEDULER_SMP_NODE_BLOCKED );
1789 _Thread_Scheduler_release_critical( thread, &lock_context );
1790 }
1791 }
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802 static inline void _Scheduler_SMP_Make_sticky(
1803 const Scheduler_Control *scheduler,
1804 Thread_Control *the_thread,
1805 Scheduler_Node *node,
1806 Scheduler_SMP_Update update,
1807 Scheduler_SMP_Enqueue enqueue
1808 )
1809 {
1810 Scheduler_SMP_Node_state node_state;
1811
1812 node_state = _Scheduler_SMP_Node_state( node );
1813
1814 if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
1815 Scheduler_Context *context;
1816 Priority_Control insert_priority;
1817 Priority_Control priority;
1818
1819 context = _Scheduler_Get_context( scheduler );
1820 priority = _Scheduler_Node_get_priority( node );
1821 priority = SCHEDULER_PRIORITY_PURIFY( priority );
1822
1823 if ( priority != _Scheduler_SMP_Node_priority( node ) ) {
1824 ( *update )( context, node, priority );
1825 }
1826
1827 insert_priority = SCHEDULER_PRIORITY_APPEND( priority );
1828 (void) ( *enqueue )( context, node, insert_priority );
1829 }
1830 }
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841 static inline void _Scheduler_SMP_Clean_sticky(
1842 const Scheduler_Control *scheduler,
1843 Thread_Control *the_thread,
1844 Scheduler_Node *node,
1845 Scheduler_SMP_Extract extract_from_scheduled,
1846 Scheduler_SMP_Extract extract_from_ready,
1847 Scheduler_SMP_Get_highest_ready get_highest_ready,
1848 Scheduler_SMP_Move move_from_ready_to_scheduled,
1849 Scheduler_SMP_Allocate_processor allocate_processor,
1850 Scheduler_Get_idle_node get_idle_node,
1851 Scheduler_Release_idle_node release_idle_node
1852 )
1853 {
1854 Scheduler_SMP_Node_state node_state;
1855
1856 node_state = _Scheduler_SMP_Node_state( node );
1857
1858 if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1859 Thread_Control *idle;
1860
1861 idle = _Scheduler_Node_get_idle( node );
1862
1863 if ( idle != NULL ) {
1864 Scheduler_Context *context;
1865
1866 context = _Scheduler_Get_context( scheduler );
1867
1868 _Scheduler_Release_idle_thread( node, idle, release_idle_node, context );
1869 _Scheduler_SMP_Schedule_highest_ready(
1870 context,
1871 node,
1872 _Thread_Get_CPU( idle ),
1873 extract_from_scheduled,
1874 extract_from_ready,
1875 get_highest_ready,
1876 move_from_ready_to_scheduled,
1877 allocate_processor,
1878 get_idle_node
1879 );
1880 }
1881 }
1882 }
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892 static inline void _Scheduler_SMP_Do_start_idle(
1893 Scheduler_Context *context,
1894 Thread_Control *idle,
1895 Per_CPU_Control *cpu,
1896 Scheduler_SMP_Register_idle register_idle
1897 )
1898 {
1899 Scheduler_SMP_Context *self;
1900 Scheduler_SMP_Node *node;
1901
1902 self = _Scheduler_SMP_Get_self( context );
1903 node = _Scheduler_SMP_Thread_get_node( idle );
1904
1905 _Scheduler_Thread_change_state( idle, THREAD_SCHEDULER_SCHEDULED );
1906 node->state = SCHEDULER_SMP_NODE_SCHEDULED;
1907
1908 _Thread_Set_CPU( idle, cpu );
1909 ( *register_idle )( context, &node->Base, cpu );
1910 _Chain_Append_unprotected( &self->Scheduled, &node->Base.Node.Chain );
1911 }
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922 static inline void _Scheduler_SMP_Add_processor(
1923 Scheduler_Context *context,
1924 Thread_Control *idle,
1925 Scheduler_SMP_Has_ready has_ready,
1926 Scheduler_SMP_Enqueue_scheduled enqueue_scheduled,
1927 Scheduler_SMP_Register_idle register_idle
1928 )
1929 {
1930 Scheduler_SMP_Context *self;
1931 Scheduler_Node *node;
1932
1933 self = _Scheduler_SMP_Get_self( context );
1934 idle->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
1935 node = _Thread_Scheduler_get_home_node( idle );
1936 _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED );
1937 ( *register_idle )( context, node, _Thread_Get_CPU( idle ) );
1938
1939 if ( ( *has_ready )( &self->Base ) ) {
1940 Priority_Control insert_priority;
1941
1942 insert_priority = _Scheduler_SMP_Node_priority( node );
1943 insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
1944 ( *enqueue_scheduled )( &self->Base, node, insert_priority );
1945 } else {
1946 _Chain_Append_unprotected( &self->Scheduled, &node->Node.Chain );
1947 }
1948 }
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963 static inline Thread_Control *_Scheduler_SMP_Remove_processor(
1964 Scheduler_Context *context,
1965 Per_CPU_Control *cpu,
1966 Scheduler_SMP_Extract extract_from_scheduled,
1967 Scheduler_SMP_Extract extract_from_ready,
1968 Scheduler_SMP_Enqueue enqueue,
1969 Scheduler_Get_idle_node get_idle_node,
1970 Scheduler_Release_idle_node release_idle_node
1971 )
1972 {
1973 Scheduler_SMP_Context *self;
1974 Chain_Node *chain_node;
1975 Scheduler_Node *victim_node;
1976 Thread_Control *victim_user;
1977 Thread_Control *victim_owner;
1978 Thread_Control *idle;
1979
1980 self = _Scheduler_SMP_Get_self( context );
1981 chain_node = _Chain_First( &self->Scheduled );
1982
1983 do {
1984 _Assert( chain_node != _Chain_Immutable_tail( &self->Scheduled ) );
1985 victim_node = (Scheduler_Node *) chain_node;
1986 victim_user = _Scheduler_Node_get_user( victim_node );
1987 chain_node = _Chain_Next( chain_node );
1988 } while ( _Thread_Get_CPU( victim_user ) != cpu );
1989
1990 ( *extract_from_scheduled )( &self->Base, victim_node );
1991 victim_owner = _Scheduler_Node_get_owner( victim_node );
1992
1993 if ( !victim_owner->is_idle ) {
1994 Thread_Control *victim_idle;
1995 Scheduler_Node *idle_node;
1996 Priority_Control insert_priority;
1997
1998 victim_idle = _Scheduler_Release_idle_thread_if_necessary(
1999 victim_node,
2000 release_idle_node,
2001 &self->Base
2002 );
2003 idle_node = ( *get_idle_node )( &self->Base );
2004 idle = _Scheduler_Node_get_owner( idle_node );
2005 _Scheduler_SMP_Preempt(
2006 &self->Base,
2007 idle_node,
2008 victim_node,
2009 victim_idle,
2010 _Scheduler_SMP_Allocate_processor_exact
2011 );
2012
2013 _Assert( !_Chain_Is_empty( &self->Scheduled ) );
2014 insert_priority = _Scheduler_SMP_Node_priority( victim_node );
2015 insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
2016 ( *enqueue )( &self->Base, victim_node, insert_priority );
2017 } else {
2018 _Assert( victim_owner == victim_user );
2019 _Assert( _Scheduler_Node_get_idle( victim_node ) == NULL );
2020 idle = victim_owner;
2021 }
2022
2023 return idle;
2024 }
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047 static inline void _Scheduler_SMP_Set_affinity(
2048 Scheduler_Context *context,
2049 Thread_Control *thread,
2050 Scheduler_Node *node,
2051 void *arg,
2052 Scheduler_SMP_Set_affinity set_affinity,
2053 Scheduler_SMP_Extract extract_from_scheduled,
2054 Scheduler_SMP_Extract extract_from_ready,
2055 Scheduler_SMP_Get_highest_ready get_highest_ready,
2056 Scheduler_SMP_Move move_from_ready_to_scheduled,
2057 Scheduler_SMP_Enqueue enqueue,
2058 Scheduler_SMP_Allocate_processor allocate_processor,
2059 Scheduler_Get_idle_node get_idle_node,
2060 Scheduler_Release_idle_node release_idle_node
2061 )
2062 {
2063 Scheduler_SMP_Node_state node_state;
2064 Priority_Control insert_priority;
2065
2066 node_state = _Scheduler_SMP_Node_state( node );
2067 insert_priority = _Scheduler_SMP_Node_priority( node );
2068 insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
2069
2070 if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
2071 ( *extract_from_scheduled )( context, node );
2072 _Scheduler_SMP_Preempt_and_schedule_highest_ready(
2073 context,
2074 node,
2075 extract_from_ready,
2076 get_highest_ready,
2077 move_from_ready_to_scheduled,
2078 allocate_processor,
2079 get_idle_node,
2080 release_idle_node
2081 );
2082 ( *set_affinity )( context, node, arg );
2083 ( *enqueue )( context, node, insert_priority );
2084 } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
2085 ( *extract_from_ready )( context, node );
2086 ( *set_affinity )( context, node, arg );
2087 ( *enqueue )( context, node, insert_priority );
2088 } else {
2089 _Assert( node_state == SCHEDULER_SMP_NODE_BLOCKED );
2090 ( *set_affinity )( context, node, arg );
2091 }
2092 }
2093
2094
2095
2096 #ifdef __cplusplus
2097 }
2098 #endif
2099
2100 #endif