File indexing completed on 2025-05-11 08:24:13
0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039 #ifndef _RTEMS_SCORE_SCHEDULERIMPL_H
0040 #define _RTEMS_SCORE_SCHEDULERIMPL_H
0041
0042 #include <rtems/score/scheduler.h>
0043 #include <rtems/score/assert.h>
0044 #include <rtems/score/priorityimpl.h>
0045 #include <rtems/score/smpimpl.h>
0046 #include <rtems/score/status.h>
0047 #include <rtems/score/threadimpl.h>
0048
0049 #ifdef __cplusplus
0050 extern "C" {
0051 #endif
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095 void _Scheduler_Handler_initialization( void );
0096
0097
0098
0099
0100
0101
0102
0103
0104 static inline Scheduler_Context *_Scheduler_Get_context(
0105 const Scheduler_Control *scheduler
0106 )
0107 {
0108 return scheduler->context;
0109 }
0110
0111
0112
0113
0114
0115
0116
0117
0118 static inline const Scheduler_Control *_Scheduler_Get_by_CPU(
0119 const Per_CPU_Control *cpu
0120 )
0121 {
0122 #if defined(RTEMS_SMP)
0123 return cpu->Scheduler.control;
0124 #else
0125 (void) cpu;
0126 return &_Scheduler_Table[ 0 ];
0127 #endif
0128 }
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138 static inline void _Scheduler_Acquire_critical(
0139 const Scheduler_Control *scheduler,
0140 ISR_lock_Context *lock_context
0141 )
0142 {
0143 #if defined(RTEMS_SMP)
0144 Scheduler_Context *context;
0145
0146 context = _Scheduler_Get_context( scheduler );
0147 _ISR_lock_Acquire( &context->Lock, lock_context );
0148 #else
0149 (void) scheduler;
0150 (void) lock_context;
0151 #endif
0152 }
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162 static inline void _Scheduler_Release_critical(
0163 const Scheduler_Control *scheduler,
0164 ISR_lock_Context *lock_context
0165 )
0166 {
0167 #if defined(RTEMS_SMP)
0168 Scheduler_Context *context;
0169
0170 context = _Scheduler_Get_context( scheduler );
0171 _ISR_lock_Release( &context->Lock, lock_context );
0172 #else
0173 (void) scheduler;
0174 (void) lock_context;
0175 #endif
0176 }
0177
0178 #if defined(RTEMS_SMP)
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188 static inline bool _Scheduler_Is_non_preempt_mode_supported(
0189 const Scheduler_Control *scheduler
0190 )
0191 {
0192 return scheduler->is_non_preempt_mode_supported;
0193 }
0194 #endif
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219 static inline void _Scheduler_Schedule( Thread_Control *the_thread )
0220 {
0221 const Scheduler_Control *scheduler;
0222 ISR_lock_Context lock_context;
0223
0224 scheduler = _Thread_Scheduler_get_home( the_thread );
0225 _Scheduler_Acquire_critical( scheduler, &lock_context );
0226
0227 ( *scheduler->Operations.schedule )( scheduler, the_thread );
0228
0229 _Scheduler_Release_critical( scheduler, &lock_context );
0230 }
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240 static inline void _Scheduler_Yield( Thread_Control *the_thread )
0241 {
0242 const Scheduler_Control *scheduler;
0243 ISR_lock_Context lock_context;
0244
0245 scheduler = _Thread_Scheduler_get_home( the_thread );
0246 _Scheduler_Acquire_critical( scheduler, &lock_context );
0247 ( *scheduler->Operations.yield )(
0248 scheduler,
0249 the_thread,
0250 _Thread_Scheduler_get_home_node( the_thread )
0251 );
0252 _Scheduler_Release_critical( scheduler, &lock_context );
0253 }
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265 static inline void _Scheduler_Block( Thread_Control *the_thread )
0266 {
0267 #if defined(RTEMS_SMP)
0268 Chain_Node *node;
0269 const Chain_Node *tail;
0270 Scheduler_Node *scheduler_node;
0271 const Scheduler_Control *scheduler;
0272 ISR_lock_Context lock_context;
0273
0274 node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
0275 tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
0276
0277 scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
0278 scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
0279
0280 _Scheduler_Acquire_critical( scheduler, &lock_context );
0281 ( *scheduler->Operations.block )(
0282 scheduler,
0283 the_thread,
0284 scheduler_node
0285 );
0286 _Scheduler_Release_critical( scheduler, &lock_context );
0287
0288 node = _Chain_Next( node );
0289
0290 while ( node != tail ) {
0291 scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
0292 scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
0293
0294 _Scheduler_Acquire_critical( scheduler, &lock_context );
0295 ( *scheduler->Operations.withdraw_node )(
0296 scheduler,
0297 the_thread,
0298 scheduler_node,
0299 THREAD_SCHEDULER_BLOCKED
0300 );
0301 _Scheduler_Release_critical( scheduler, &lock_context );
0302
0303 node = _Chain_Next( node );
0304 }
0305 #else
0306 const Scheduler_Control *scheduler;
0307
0308 scheduler = _Thread_Scheduler_get_home( the_thread );
0309 ( *scheduler->Operations.block )(
0310 scheduler,
0311 the_thread,
0312 _Thread_Scheduler_get_home_node( the_thread )
0313 );
0314 #endif
0315 }
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327 static inline void _Scheduler_Unblock( Thread_Control *the_thread )
0328 {
0329 Scheduler_Node *scheduler_node;
0330 const Scheduler_Control *scheduler;
0331 ISR_lock_Context lock_context;
0332
0333 #if defined(RTEMS_SMP)
0334 scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE(
0335 _Chain_First( &the_thread->Scheduler.Scheduler_nodes )
0336 );
0337 scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
0338 #else
0339 scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
0340 scheduler = _Thread_Scheduler_get_home( the_thread );
0341 #endif
0342
0343 _Scheduler_Acquire_critical( scheduler, &lock_context );
0344 ( *scheduler->Operations.unblock )( scheduler, the_thread, scheduler_node );
0345 _Scheduler_Release_critical( scheduler, &lock_context );
0346 }
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362 static inline void _Scheduler_Update_priority( Thread_Control *the_thread )
0363 {
0364 #if defined(RTEMS_SMP)
0365 Chain_Node *node;
0366 const Chain_Node *tail;
0367
0368 _Thread_Scheduler_process_requests( the_thread );
0369
0370 node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
0371 tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
0372
0373 do {
0374 Scheduler_Node *scheduler_node;
0375 const Scheduler_Control *scheduler;
0376 ISR_lock_Context lock_context;
0377
0378 scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
0379 scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
0380
0381 _Scheduler_Acquire_critical( scheduler, &lock_context );
0382 ( *scheduler->Operations.update_priority )(
0383 scheduler,
0384 the_thread,
0385 scheduler_node
0386 );
0387 _Scheduler_Release_critical( scheduler, &lock_context );
0388
0389 node = _Chain_Next( node );
0390 } while ( node != tail );
0391 #else
0392 const Scheduler_Control *scheduler;
0393
0394 scheduler = _Thread_Scheduler_get_home( the_thread );
0395 ( *scheduler->Operations.update_priority )(
0396 scheduler,
0397 the_thread,
0398 _Thread_Scheduler_get_home_node( the_thread )
0399 );
0400 #endif
0401 }
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416 static inline Priority_Control _Scheduler_Map_priority(
0417 const Scheduler_Control *scheduler,
0418 Priority_Control priority
0419 )
0420 {
0421 return ( *scheduler->Operations.map_priority )( scheduler, priority );
0422 }
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432 static inline Priority_Control _Scheduler_Unmap_priority(
0433 const Scheduler_Control *scheduler,
0434 Priority_Control priority
0435 )
0436 {
0437 return ( *scheduler->Operations.unmap_priority )( scheduler, priority );
0438 }
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452
0453 static inline void _Scheduler_Node_initialize(
0454 const Scheduler_Control *scheduler,
0455 Scheduler_Node *node,
0456 Thread_Control *the_thread,
0457 Priority_Control priority
0458 )
0459 {
0460 ( *scheduler->Operations.node_initialize )(
0461 scheduler,
0462 node,
0463 the_thread,
0464 priority
0465 );
0466 }
0467
0468
0469
0470
0471
0472
0473
0474
0475
0476
0477 static inline void _Scheduler_Node_destroy(
0478 const Scheduler_Control *scheduler,
0479 Scheduler_Node *node
0480 )
0481 {
0482 ( *scheduler->Operations.node_destroy )( scheduler, node );
0483 }
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494 static inline void _Scheduler_Release_job(
0495 Thread_Control *the_thread,
0496 Priority_Node *priority_node,
0497 uint64_t deadline,
0498 Thread_queue_Context *queue_context
0499 )
0500 {
0501 const Scheduler_Control *scheduler = _Thread_Scheduler_get_home( the_thread );
0502
0503 _Thread_queue_Context_clear_priority_updates( queue_context );
0504 ( *scheduler->Operations.release_job )(
0505 scheduler,
0506 the_thread,
0507 priority_node,
0508 deadline,
0509 queue_context
0510 );
0511 }
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521 static inline void _Scheduler_Cancel_job(
0522 Thread_Control *the_thread,
0523 Priority_Node *priority_node,
0524 Thread_queue_Context *queue_context
0525 )
0526 {
0527 const Scheduler_Control *scheduler = _Thread_Scheduler_get_home( the_thread );
0528
0529 _Thread_queue_Context_clear_priority_updates( queue_context );
0530 ( *scheduler->Operations.cancel_job )(
0531 scheduler,
0532 the_thread,
0533 priority_node,
0534 queue_context
0535 );
0536 }
0537
0538
0539
0540
0541
0542
0543
0544
0545
0546
0547 static inline void _Scheduler_Start_idle(
0548 const Scheduler_Control *scheduler,
0549 Thread_Control *the_thread,
0550 Per_CPU_Control *cpu
0551 )
0552 {
0553 ( *scheduler->Operations.start_idle )( scheduler, the_thread, cpu );
0554 }
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566 static inline bool _Scheduler_Has_processor_ownership(
0567 const Scheduler_Control *scheduler,
0568 uint32_t cpu_index
0569 )
0570 {
0571 #if defined(RTEMS_SMP)
0572 const Per_CPU_Control *cpu;
0573 const Scheduler_Control *scheduler_of_cpu;
0574
0575 cpu = _Per_CPU_Get_by_index( cpu_index );
0576 scheduler_of_cpu = _Scheduler_Get_by_CPU( cpu );
0577
0578 return scheduler_of_cpu == scheduler;
0579 #else
0580 (void) scheduler;
0581 (void) cpu_index;
0582
0583 return true;
0584 #endif
0585 }
0586
0587
0588
0589
0590
0591
0592
0593
0594 static inline const Processor_mask *_Scheduler_Get_processors(
0595 const Scheduler_Control *scheduler
0596 )
0597 {
0598 #if defined(RTEMS_SMP)
0599 return &_Scheduler_Get_context( scheduler )->Processors;
0600 #else
0601 return &_Processor_mask_The_one_and_only;
0602 #endif
0603 }
0604
0605
0606
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616 Status_Control _Scheduler_Get_affinity(
0617 Thread_Control *the_thread,
0618 size_t cpusetsize,
0619 cpu_set_t *cpuset
0620 );
0621
0622
0623
0624
0625
0626
0627
0628
0629
0630
0631
0632
0633
0634
0635 static inline Status_Control _Scheduler_default_Set_affinity_body(
0636 const Scheduler_Control *scheduler,
0637 Thread_Control *the_thread,
0638 Scheduler_Node *node,
0639 const Processor_mask *affinity
0640 )
0641 {
0642 (void) scheduler;
0643 (void) the_thread;
0644 (void) node;
0645
0646 if ( !_Processor_mask_Is_subset( affinity, _SMP_Get_online_processors() ) ) {
0647 return STATUS_INVALID_NUMBER;
0648 }
0649
0650 return STATUS_SUCCESSFUL;
0651 }
0652
0653
0654
0655
0656
0657
0658
0659
0660
0661
0662
0663
0664
0665 Status_Control _Scheduler_Set_affinity(
0666 Thread_Control *the_thread,
0667 size_t cpusetsize,
0668 const cpu_set_t *cpuset
0669 );
0670
0671
0672
0673
0674
0675
0676
0677
0678 static inline uint32_t _Scheduler_Get_processor_count(
0679 const Scheduler_Control *scheduler
0680 )
0681 {
0682 #if defined(RTEMS_SMP)
0683 const Scheduler_Context *context = _Scheduler_Get_context( scheduler );
0684
0685 return _Processor_mask_Count( &context->Processors );
0686 #else
0687 (void) scheduler;
0688
0689 return 1;
0690 #endif
0691 }
0692
0693
0694
0695
0696
0697
0698
0699
0700 static inline Objects_Id _Scheduler_Build_id( uint32_t scheduler_index )
0701 {
0702 return _Objects_Build_id(
0703 OBJECTS_FAKE_OBJECTS_API,
0704 OBJECTS_FAKE_OBJECTS_SCHEDULERS,
0705 _Objects_Local_node,
0706 (uint16_t) ( scheduler_index + 1 )
0707 );
0708 }
0709
0710
0711
0712
0713
0714
0715
0716
0717 static inline uint32_t _Scheduler_Get_index_by_id( Objects_Id id )
0718 {
0719 uint32_t minimum_id = _Scheduler_Build_id( 0 );
0720
0721 return id - minimum_id;
0722 }
0723
0724
0725
0726
0727
0728
0729
0730
0731 static inline const Scheduler_Control *_Scheduler_Get_by_id(
0732 Objects_Id id
0733 )
0734 {
0735 uint32_t index;
0736
0737 index = _Scheduler_Get_index_by_id( id );
0738
0739 if ( index >= _Scheduler_Count ) {
0740 return NULL;
0741 }
0742
0743 return &_Scheduler_Table[ index ];
0744 }
0745
0746
0747
0748
0749
0750
0751
0752
0753 static inline uint32_t _Scheduler_Get_index(
0754 const Scheduler_Control *scheduler
0755 )
0756 {
0757 return (uint32_t) (scheduler - &_Scheduler_Table[ 0 ]);
0758 }
0759
0760 #if defined(RTEMS_SMP)
0761
0762
0763
0764
0765
0766
0767
0768
0769
0770 typedef Scheduler_Node *( *Scheduler_Get_idle_node )( void *arg );
0771
0772
0773
0774
0775
0776
0777
0778
0779 typedef void ( *Scheduler_Release_idle_node )(
0780 Scheduler_Node *node,
0781 void *arg
0782 );
0783
0784
0785
0786
0787
0788
0789
0790 static inline void _Scheduler_Thread_change_state(
0791 Thread_Control *the_thread,
0792 Thread_Scheduler_state new_state
0793 )
0794 {
0795 _Assert(
0796 _ISR_lock_Is_owner( &the_thread->Scheduler.Lock )
0797 || the_thread->Scheduler.state == THREAD_SCHEDULER_BLOCKED
0798 || !_System_state_Is_up( _System_state_Get() )
0799 );
0800
0801 the_thread->Scheduler.state = new_state;
0802 }
0803
0804
0805
0806
0807
0808
0809
0810
0811
0812
0813 static inline Thread_Control *_Scheduler_Use_idle_thread(
0814 Scheduler_Node *node,
0815 Scheduler_Get_idle_node get_idle_node,
0816 void *arg
0817 )
0818 {
0819 Scheduler_Node *idle_node;
0820 Thread_Control *idle;
0821
0822 idle_node = ( *get_idle_node )( arg );
0823 idle = _Scheduler_Node_get_owner( idle_node );
0824 _Assert( idle->is_idle );
0825 _Scheduler_Node_set_idle_user( node, idle );
0826
0827 return idle;
0828 }
0829
0830
0831
0832
0833
0834
0835
0836
0837
0838
0839
0840
0841 static inline void _Scheduler_Release_idle_thread(
0842 Scheduler_Node *node,
0843 const Thread_Control *idle,
0844 Scheduler_Release_idle_node release_idle_node,
0845 void *arg
0846 )
0847 {
0848 Thread_Control *owner;
0849 Scheduler_Node *idle_node;
0850
0851 owner = _Scheduler_Node_get_owner( node );
0852 _Assert( _Scheduler_Node_get_user( node ) == idle );
0853 _Scheduler_Node_set_user( node, owner );
0854 node->idle = NULL;
0855 idle_node = _Thread_Scheduler_get_home_node( idle );
0856 ( *release_idle_node )( idle_node, arg );
0857 }
0858
0859
0860
0861
0862
0863
0864
0865
0866
0867
0868
0869
0870
0871
0872
0873 static inline Thread_Control *_Scheduler_Release_idle_thread_if_necessary(
0874 Scheduler_Node *node,
0875 Scheduler_Release_idle_node release_idle_node,
0876 void *arg
0877 )
0878 {
0879 Thread_Control *idle;
0880
0881 idle = _Scheduler_Node_get_idle( node );
0882
0883 if ( idle != NULL ) {
0884 _Scheduler_Release_idle_thread( node, idle, release_idle_node, arg );
0885 }
0886
0887 return idle;
0888 }
0889
0890
0891
0892
0893
0894
0895
0896
0897
0898
0899
0900
0901 static inline void _Scheduler_Discard_idle_thread(
0902 Thread_Control *the_thread,
0903 Scheduler_Node *node,
0904 Scheduler_Release_idle_node release_idle_node,
0905 void *arg
0906 )
0907 {
0908 Thread_Control *idle;
0909 Per_CPU_Control *cpu;
0910
0911 idle = _Scheduler_Node_get_idle( node );
0912 _Scheduler_Release_idle_thread( node, idle, release_idle_node, arg );
0913
0914 cpu = _Thread_Get_CPU( idle );
0915 _Thread_Set_CPU( the_thread, cpu );
0916 _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, the_thread );
0917 }
0918 #endif
0919
0920
0921
0922
0923
0924
0925
0926
0927
0928
0929
0930
0931 static inline Status_Control _Scheduler_Set(
0932 const Scheduler_Control *new_scheduler,
0933 Thread_Control *the_thread,
0934 Priority_Control priority
0935 )
0936 {
0937 Scheduler_Node *new_scheduler_node;
0938 Scheduler_Node *old_scheduler_node;
0939 #if defined(RTEMS_SMP)
0940 ISR_lock_Context lock_context;
0941 const Scheduler_Control *old_scheduler;
0942
0943 #endif
0944
0945 #if defined(RTEMS_SCORE_THREAD_HAS_SCHEDULER_CHANGE_INHIBITORS)
0946 if ( the_thread->is_scheduler_change_inhibited ) {
0947 return STATUS_RESOURCE_IN_USE;
0948 }
0949 #endif
0950
0951 if ( the_thread->Wait.queue != NULL ) {
0952 return STATUS_RESOURCE_IN_USE;
0953 }
0954
0955 old_scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
0956 _Priority_Plain_extract(
0957 &old_scheduler_node->Wait.Priority,
0958 &the_thread->Real_priority
0959 );
0960
0961 if (
0962 !_Priority_Is_empty( &old_scheduler_node->Wait.Priority )
0963 #if defined(RTEMS_SMP)
0964 || !_Chain_Has_only_one_node( &the_thread->Scheduler.Wait_nodes )
0965 || the_thread->Scheduler.pin_level != 0
0966 #endif
0967 ) {
0968 _Priority_Plain_insert(
0969 &old_scheduler_node->Wait.Priority,
0970 &the_thread->Real_priority,
0971 the_thread->Real_priority.priority
0972 );
0973 return STATUS_RESOURCE_IN_USE;
0974 }
0975
0976 #if defined(RTEMS_SMP)
0977 old_scheduler = _Thread_Scheduler_get_home( the_thread );
0978 new_scheduler_node = _Thread_Scheduler_get_node_by_index(
0979 the_thread,
0980 _Scheduler_Get_index( new_scheduler )
0981 );
0982
0983 _Scheduler_Acquire_critical( new_scheduler, &lock_context );
0984
0985 if (
0986 _Scheduler_Get_processor_count( new_scheduler ) == 0
0987 || ( *new_scheduler->Operations.set_affinity )(
0988 new_scheduler,
0989 the_thread,
0990 new_scheduler_node,
0991 &the_thread->Scheduler.Affinity
0992 ) != STATUS_SUCCESSFUL
0993 ) {
0994 _Scheduler_Release_critical( new_scheduler, &lock_context );
0995 _Priority_Plain_insert(
0996 &old_scheduler_node->Wait.Priority,
0997 &the_thread->Real_priority,
0998 the_thread->Real_priority.priority
0999 );
1000 return STATUS_UNSATISFIED;
1001 }
1002
1003 _Assert( the_thread->Scheduler.pinned_scheduler == NULL );
1004 the_thread->Scheduler.home_scheduler = new_scheduler;
1005
1006 _Scheduler_Release_critical( new_scheduler, &lock_context );
1007
1008 _Thread_Scheduler_process_requests( the_thread );
1009 #else
1010 new_scheduler_node = old_scheduler_node;
1011 #endif
1012
1013 the_thread->Start.initial_priority = priority;
1014 _Priority_Node_set_priority( &the_thread->Real_priority, priority );
1015 _Priority_Initialize_one(
1016 &new_scheduler_node->Wait.Priority,
1017 &the_thread->Real_priority
1018 );
1019
1020 #if defined(RTEMS_SMP)
1021 if ( old_scheduler != new_scheduler ) {
1022 States_Control current_state;
1023
1024 current_state = the_thread->current_state;
1025
1026 if ( _States_Is_ready( current_state ) ) {
1027 _Scheduler_Block( the_thread );
1028 }
1029
1030 _Assert( old_scheduler_node->sticky_level == 0 );
1031 _Assert( new_scheduler_node->sticky_level == 0 );
1032
1033 _Chain_Extract_unprotected( &old_scheduler_node->Thread.Wait_node );
1034 _Assert( _Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
1035 _Chain_Initialize_one(
1036 &the_thread->Scheduler.Wait_nodes,
1037 &new_scheduler_node->Thread.Wait_node
1038 );
1039 _Chain_Extract_unprotected(
1040 &old_scheduler_node->Thread.Scheduler_node.Chain
1041 );
1042 _Assert( _Chain_Is_empty( &the_thread->Scheduler.Scheduler_nodes ) );
1043 _Chain_Initialize_one(
1044 &the_thread->Scheduler.Scheduler_nodes,
1045 &new_scheduler_node->Thread.Scheduler_node.Chain
1046 );
1047
1048 _Scheduler_Node_set_priority(
1049 new_scheduler_node,
1050 priority,
1051 PRIORITY_GROUP_LAST
1052 );
1053
1054 if ( _States_Is_ready( current_state ) ) {
1055 _Scheduler_Unblock( the_thread );
1056 }
1057
1058 return STATUS_SUCCESSFUL;
1059 }
1060 #endif
1061
1062 _Scheduler_Node_set_priority(
1063 new_scheduler_node,
1064 priority,
1065 PRIORITY_GROUP_LAST
1066 );
1067 _Scheduler_Update_priority( the_thread );
1068 return STATUS_SUCCESSFUL;
1069 }
1070
1071
1072
1073 #ifdef __cplusplus
1074 }
1075 #endif
1076
1077 #endif
1078