File indexing completed on 2025-05-11 08:24:27
0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042 #ifdef HAVE_CONFIG_H
0043 #include "config.h"
0044 #endif
0045
0046 #include <rtems/score/threaddispatch.h>
0047 #include <rtems/score/assert.h>
0048 #include <rtems/score/isr.h>
0049 #include <rtems/score/schedulerimpl.h>
0050 #include <rtems/score/threadimpl.h>
0051 #include <rtems/score/todimpl.h>
0052 #include <rtems/score/userextimpl.h>
0053 #include <rtems/config.h>
0054
0055 #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
0056 Thread_Control *_Thread_Allocated_fp;
0057 #endif
0058
0059 CHAIN_DEFINE_EMPTY( _User_extensions_Switches_list );
0060
0061 #if defined(RTEMS_SMP)
0062 static ISR_Level _Thread_Check_pinning(
0063 Thread_Control *executing,
0064 Per_CPU_Control *cpu_self,
0065 ISR_Level level
0066 )
0067 {
0068 unsigned int pin_level;
0069
0070 pin_level = executing->Scheduler.pin_level;
0071
0072 if (
0073 RTEMS_PREDICT_FALSE( pin_level != 0 )
0074 && ( pin_level & THREAD_PIN_PREEMPTION ) == 0
0075 ) {
0076 ISR_lock_Context state_lock_context;
0077 ISR_lock_Context scheduler_lock_context;
0078 const Scheduler_Control *pinned_scheduler;
0079 Scheduler_Node *pinned_node;
0080 const Scheduler_Control *home_scheduler;
0081
0082 _ISR_Local_enable( level );
0083
0084 executing->Scheduler.pin_level = pin_level | THREAD_PIN_PREEMPTION;
0085
0086 _Thread_State_acquire( executing, &state_lock_context );
0087
0088 pinned_scheduler = _Scheduler_Get_by_CPU( cpu_self );
0089 pinned_node = _Thread_Scheduler_get_node_by_index(
0090 executing,
0091 _Scheduler_Get_index( pinned_scheduler )
0092 );
0093
0094 if ( _Thread_Is_ready( executing ) ) {
0095 _Scheduler_Block( executing);
0096 }
0097
0098 home_scheduler = _Thread_Scheduler_get_home( executing );
0099 executing->Scheduler.pinned_scheduler = pinned_scheduler;
0100
0101 if ( home_scheduler != pinned_scheduler ) {
0102 _Chain_Extract_unprotected( &pinned_node->Thread.Scheduler_node.Chain );
0103 _Chain_Prepend_unprotected(
0104 &executing->Scheduler.Scheduler_nodes,
0105 &pinned_node->Thread.Scheduler_node.Chain
0106 );
0107 }
0108
0109 _Scheduler_Acquire_critical( pinned_scheduler, &scheduler_lock_context );
0110
0111 ( *pinned_scheduler->Operations.pin )(
0112 pinned_scheduler,
0113 executing,
0114 pinned_node,
0115 cpu_self
0116 );
0117
0118 if ( _Thread_Is_ready( executing ) ) {
0119 ( *pinned_scheduler->Operations.unblock )(
0120 pinned_scheduler,
0121 executing,
0122 pinned_node
0123 );
0124 }
0125
0126 _Scheduler_Release_critical( pinned_scheduler, &scheduler_lock_context );
0127
0128 _Thread_State_release( executing, &state_lock_context );
0129
0130 _ISR_Local_disable( level );
0131 }
0132
0133 return level;
0134 }
0135
0136 static void _Thread_Ask_for_help( Thread_Control *the_thread )
0137 {
0138 Chain_Node *node;
0139 const Chain_Node *tail;
0140
0141 node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
0142 tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
0143
0144 do {
0145 Scheduler_Node *scheduler_node;
0146 const Scheduler_Control *scheduler;
0147 ISR_lock_Context lock_context;
0148 bool success;
0149
0150 scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
0151 scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
0152
0153 _Scheduler_Acquire_critical( scheduler, &lock_context );
0154 success = ( *scheduler->Operations.ask_for_help )(
0155 scheduler,
0156 the_thread,
0157 scheduler_node
0158 );
0159 _Scheduler_Release_critical( scheduler, &lock_context );
0160
0161 if ( success ) {
0162 break;
0163 }
0164
0165 node = _Chain_Next( node );
0166 } while ( node != tail );
0167 }
0168
0169 static bool _Thread_Can_ask_for_help( const Thread_Control *executing )
0170 {
0171 return executing->Scheduler.helping_nodes > 0
0172 && _Thread_Is_ready( executing );
0173 }
0174 #endif
0175
0176 static ISR_Level _Thread_Preemption_intervention(
0177 Thread_Control *executing,
0178 Per_CPU_Control *cpu_self,
0179 ISR_Level level
0180 )
0181 {
0182 #if defined(RTEMS_SMP)
0183 ISR_lock_Context lock_context;
0184
0185 level = _Thread_Check_pinning( executing, cpu_self, level );
0186
0187 _Per_CPU_Acquire( cpu_self, &lock_context );
0188
0189 while ( !_Chain_Is_empty( &cpu_self->Threads_in_need_for_help ) ) {
0190 Chain_Node *node;
0191 Thread_Control *the_thread;
0192
0193 node = _Chain_Get_first_unprotected( &cpu_self->Threads_in_need_for_help );
0194 the_thread = THREAD_OF_SCHEDULER_HELP_NODE( node );
0195 the_thread->Scheduler.ask_for_help_cpu = NULL;
0196
0197 _Per_CPU_Release( cpu_self, &lock_context );
0198
0199 _Thread_State_acquire( the_thread, &lock_context );
0200 _Thread_Ask_for_help( the_thread );
0201 _Thread_State_release( the_thread, &lock_context );
0202
0203 _Per_CPU_Acquire( cpu_self, &lock_context );
0204 }
0205
0206 _Per_CPU_Release( cpu_self, &lock_context );
0207 #else
0208 (void) cpu_self;
0209 #endif
0210
0211 return level;
0212 }
0213
0214 static void _Thread_Post_switch_cleanup( Thread_Control *executing )
0215 {
0216 #if defined(RTEMS_SMP)
0217 Chain_Node *node;
0218 const Chain_Node *tail;
0219
0220 if ( !_Thread_Can_ask_for_help( executing ) ) {
0221 return;
0222 }
0223
0224 node = _Chain_First( &executing->Scheduler.Scheduler_nodes );
0225 tail = _Chain_Immutable_tail( &executing->Scheduler.Scheduler_nodes );
0226
0227 do {
0228 Scheduler_Node *scheduler_node;
0229 const Scheduler_Control *scheduler;
0230 ISR_lock_Context lock_context;
0231
0232 scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
0233 scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
0234
0235 _Scheduler_Acquire_critical( scheduler, &lock_context );
0236 ( *scheduler->Operations.reconsider_help_request )(
0237 scheduler,
0238 executing,
0239 scheduler_node
0240 );
0241 _Scheduler_Release_critical( scheduler, &lock_context );
0242
0243 node = _Chain_Next( node );
0244 } while ( node != tail );
0245 #else
0246 (void) executing;
0247 #endif
0248 }
0249
0250 static Thread_Action *_Thread_Get_post_switch_action(
0251 Thread_Control *executing
0252 )
0253 {
0254 Chain_Control *chain = &executing->Post_switch_actions.Chain;
0255
0256 return (Thread_Action *) _Chain_Get_unprotected( chain );
0257 }
0258
0259 static void _Thread_Run_post_switch_actions( Thread_Control *executing )
0260 {
0261 ISR_lock_Context lock_context;
0262 Thread_Action *action;
0263
0264 _Thread_State_acquire( executing, &lock_context );
0265 _Thread_Post_switch_cleanup( executing );
0266 action = _Thread_Get_post_switch_action( executing );
0267
0268 while ( action != NULL ) {
0269 _Chain_Set_off_chain( &action->Node );
0270 ( *action->handler )( executing, action, &lock_context );
0271 action = _Thread_Get_post_switch_action( executing );
0272 }
0273
0274 _Thread_State_release( executing, &lock_context );
0275 }
0276
0277 void _Thread_Do_dispatch( Per_CPU_Control *cpu_self, ISR_Level level )
0278 {
0279 Thread_Control *executing;
0280
0281 _Assert( cpu_self->thread_dispatch_disable_level == 1 );
0282
0283 #if defined(RTEMS_SCORE_ROBUST_THREAD_DISPATCH)
0284 if (
0285 !_ISR_Is_enabled( level )
0286 #if defined(RTEMS_SMP) && CPU_ENABLE_ROBUST_THREAD_DISPATCH == FALSE
0287 && _SMP_Need_inter_processor_interrupts()
0288 #endif
0289 ) {
0290 _Internal_error( INTERNAL_ERROR_BAD_THREAD_DISPATCH_ENVIRONMENT );
0291 }
0292 #endif
0293
0294 executing = cpu_self->executing;
0295
0296 do {
0297 Thread_Control *heir;
0298 const Thread_CPU_budget_operations *cpu_budget_operations;
0299
0300 level = _Thread_Preemption_intervention( executing, cpu_self, level );
0301 heir = _Thread_Get_heir_and_make_it_executing( cpu_self );
0302
0303
0304
0305
0306
0307
0308 if ( heir == executing ) {
0309 break;
0310 }
0311
0312
0313
0314
0315
0316
0317 cpu_budget_operations = heir->CPU_budget.operations;
0318
0319 if ( cpu_budget_operations != NULL ) {
0320 ( *cpu_budget_operations->at_context_switch )( heir );
0321 }
0322
0323 _ISR_Local_enable( level );
0324
0325 #if !defined(RTEMS_SMP)
0326 _User_extensions_Thread_switch( executing, heir );
0327 #endif
0328 _Thread_Save_fp( executing );
0329 _Context_Switch( &executing->Registers, &heir->Registers );
0330 _Thread_Restore_fp( executing );
0331 #if defined(RTEMS_SMP)
0332 _User_extensions_Thread_switch( NULL, executing );
0333 #endif
0334
0335
0336
0337
0338
0339
0340 cpu_self = _Per_CPU_Get();
0341
0342 _ISR_Local_disable( level );
0343 } while ( cpu_self->dispatch_necessary );
0344
0345
0346
0347
0348
0349
0350 _Assert( cpu_self->thread_dispatch_disable_level == 1 );
0351 cpu_self->thread_dispatch_disable_level = 0;
0352 _Profiling_Thread_dispatch_enable( cpu_self, 0 );
0353
0354 _ISR_Local_enable( level );
0355
0356 _Thread_Run_post_switch_actions( executing );
0357 }
0358
0359 void _Thread_Dispatch_direct( Per_CPU_Control *cpu_self )
0360 {
0361 ISR_Level level;
0362
0363 if ( cpu_self->thread_dispatch_disable_level != 1 ) {
0364 _Internal_error( INTERNAL_ERROR_BAD_THREAD_DISPATCH_DISABLE_LEVEL );
0365 }
0366
0367 _ISR_Local_disable( level );
0368 _Thread_Do_dispatch( cpu_self, level );
0369 }
0370
0371 RTEMS_ALIAS( _Thread_Dispatch_direct ) void
0372 _Thread_Dispatch_direct_no_return( Per_CPU_Control * );
0373
0374 void _Thread_Dispatch_enable( Per_CPU_Control *cpu_self )
0375 {
0376 uint32_t disable_level = cpu_self->thread_dispatch_disable_level;
0377
0378 if ( disable_level == 1 ) {
0379 ISR_Level level;
0380
0381 _ISR_Local_disable( level );
0382
0383 if (
0384 cpu_self->dispatch_necessary
0385 #if defined(RTEMS_SCORE_ROBUST_THREAD_DISPATCH)
0386 || !_ISR_Is_enabled( level )
0387 #endif
0388 ) {
0389 _Thread_Do_dispatch( cpu_self, level );
0390 } else {
0391 cpu_self->thread_dispatch_disable_level = 0;
0392 _Profiling_Thread_dispatch_enable( cpu_self, 0 );
0393 _ISR_Local_enable( level );
0394 }
0395 } else {
0396 _Assert( disable_level > 0 );
0397 cpu_self->thread_dispatch_disable_level = disable_level - 1;
0398 }
0399 }