Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:24:27

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /**
0004  * @file
0005  *
0006  * @ingroup RTEMSScoreThread
0007  *
0008  * @brief This source file contains the definition of ::_Thread_Allocated_fp
0009  *   and ::_User_extensions_Switches_list and the implementation of
0010  *   _Thread_Dispatch_direct(), _Thread_Dispatch_enable(),
0011  *   and _Thread_Do_dispatch().
0012  */
0013 
0014 /*
0015  *  COPYRIGHT (c) 1989-2009.
0016  *  On-Line Applications Research Corporation (OAR).
0017  *
0018  *  Copyright (C) 2014, 2018 embedded brains GmbH & Co. KG
0019  *
0020  * Redistribution and use in source and binary forms, with or without
0021  * modification, are permitted provided that the following conditions
0022  * are met:
0023  * 1. Redistributions of source code must retain the above copyright
0024  *    notice, this list of conditions and the following disclaimer.
0025  * 2. Redistributions in binary form must reproduce the above copyright
0026  *    notice, this list of conditions and the following disclaimer in the
0027  *    documentation and/or other materials provided with the distribution.
0028  *
0029  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0030  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0031  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0032  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
0033  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0034  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0035  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0036  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0037  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0038  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
0039  * POSSIBILITY OF SUCH DAMAGE.
0040  */
0041 
0042 #ifdef HAVE_CONFIG_H
0043 #include "config.h"
0044 #endif
0045 
0046 #include <rtems/score/threaddispatch.h>
0047 #include <rtems/score/assert.h>
0048 #include <rtems/score/isr.h>
0049 #include <rtems/score/schedulerimpl.h>
0050 #include <rtems/score/threadimpl.h>
0051 #include <rtems/score/todimpl.h>
0052 #include <rtems/score/userextimpl.h>
0053 #include <rtems/config.h>
0054 
0055 #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
0056 Thread_Control *_Thread_Allocated_fp;
0057 #endif
0058 
0059 CHAIN_DEFINE_EMPTY( _User_extensions_Switches_list );
0060 
0061 #if defined(RTEMS_SMP)
0062 static ISR_Level _Thread_Check_pinning(
0063   Thread_Control  *executing,
0064   Per_CPU_Control *cpu_self,
0065   ISR_Level        level
0066 )
0067 {
0068   unsigned int pin_level;
0069 
0070   pin_level = executing->Scheduler.pin_level;
0071 
0072   if (
0073     RTEMS_PREDICT_FALSE( pin_level != 0 )
0074       && ( pin_level & THREAD_PIN_PREEMPTION ) == 0
0075   ) {
0076     ISR_lock_Context         state_lock_context;
0077     ISR_lock_Context         scheduler_lock_context;
0078     const Scheduler_Control *pinned_scheduler;
0079     Scheduler_Node          *pinned_node;
0080     const Scheduler_Control *home_scheduler;
0081 
0082     _ISR_Local_enable( level );
0083 
0084     executing->Scheduler.pin_level = pin_level | THREAD_PIN_PREEMPTION;
0085 
0086     _Thread_State_acquire( executing, &state_lock_context );
0087 
0088     pinned_scheduler = _Scheduler_Get_by_CPU( cpu_self );
0089     pinned_node = _Thread_Scheduler_get_node_by_index(
0090       executing,
0091       _Scheduler_Get_index( pinned_scheduler )
0092     );
0093 
0094     if ( _Thread_Is_ready( executing ) ) {
0095       _Scheduler_Block( executing);
0096     }
0097 
0098     home_scheduler = _Thread_Scheduler_get_home( executing );
0099     executing->Scheduler.pinned_scheduler = pinned_scheduler;
0100 
0101     if ( home_scheduler != pinned_scheduler ) {
0102       _Chain_Extract_unprotected( &pinned_node->Thread.Scheduler_node.Chain );
0103       _Chain_Prepend_unprotected(
0104         &executing->Scheduler.Scheduler_nodes,
0105         &pinned_node->Thread.Scheduler_node.Chain
0106       );
0107     }
0108 
0109     _Scheduler_Acquire_critical( pinned_scheduler, &scheduler_lock_context );
0110 
0111     ( *pinned_scheduler->Operations.pin )(
0112       pinned_scheduler,
0113       executing,
0114       pinned_node,
0115       cpu_self
0116     );
0117 
0118     if ( _Thread_Is_ready( executing ) ) {
0119       ( *pinned_scheduler->Operations.unblock )(
0120         pinned_scheduler,
0121         executing,
0122         pinned_node
0123       );
0124     }
0125 
0126     _Scheduler_Release_critical( pinned_scheduler, &scheduler_lock_context );
0127 
0128     _Thread_State_release( executing, &state_lock_context );
0129 
0130     _ISR_Local_disable( level );
0131   }
0132 
0133   return level;
0134 }
0135 
0136 static void _Thread_Ask_for_help( Thread_Control *the_thread )
0137 {
0138   Chain_Node       *node;
0139   const Chain_Node *tail;
0140 
0141   node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
0142   tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
0143 
0144   do {
0145     Scheduler_Node          *scheduler_node;
0146     const Scheduler_Control *scheduler;
0147     ISR_lock_Context         lock_context;
0148     bool                     success;
0149 
0150     scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
0151     scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
0152 
0153     _Scheduler_Acquire_critical( scheduler, &lock_context );
0154     success = ( *scheduler->Operations.ask_for_help )(
0155       scheduler,
0156       the_thread,
0157       scheduler_node
0158     );
0159     _Scheduler_Release_critical( scheduler, &lock_context );
0160 
0161     if ( success ) {
0162       break;
0163     }
0164 
0165     node = _Chain_Next( node );
0166   } while ( node != tail );
0167 }
0168 
0169 static bool _Thread_Can_ask_for_help( const Thread_Control *executing )
0170 {
0171   return executing->Scheduler.helping_nodes > 0
0172     && _Thread_Is_ready( executing );
0173 }
0174 #endif
0175 
0176 static ISR_Level _Thread_Preemption_intervention(
0177   Thread_Control  *executing,
0178   Per_CPU_Control *cpu_self,
0179   ISR_Level        level
0180 )
0181 {
0182 #if defined(RTEMS_SMP)
0183   ISR_lock_Context lock_context;
0184 
0185   level = _Thread_Check_pinning( executing, cpu_self, level );
0186 
0187   _Per_CPU_Acquire( cpu_self, &lock_context );
0188 
0189   while ( !_Chain_Is_empty( &cpu_self->Threads_in_need_for_help ) ) {
0190     Chain_Node     *node;
0191     Thread_Control *the_thread;
0192 
0193     node = _Chain_Get_first_unprotected( &cpu_self->Threads_in_need_for_help );
0194     the_thread = THREAD_OF_SCHEDULER_HELP_NODE( node );
0195     the_thread->Scheduler.ask_for_help_cpu = NULL;
0196 
0197     _Per_CPU_Release( cpu_self, &lock_context );
0198 
0199     _Thread_State_acquire( the_thread, &lock_context );
0200     _Thread_Ask_for_help( the_thread );
0201     _Thread_State_release( the_thread, &lock_context );
0202 
0203     _Per_CPU_Acquire( cpu_self, &lock_context );
0204   }
0205 
0206   _Per_CPU_Release( cpu_self, &lock_context );
0207 #else
0208   (void) cpu_self;
0209 #endif
0210 
0211   return level;
0212 }
0213 
0214 static void _Thread_Post_switch_cleanup( Thread_Control *executing )
0215 {
0216 #if defined(RTEMS_SMP)
0217   Chain_Node       *node;
0218   const Chain_Node *tail;
0219 
0220   if ( !_Thread_Can_ask_for_help( executing ) ) {
0221     return;
0222   }
0223 
0224   node = _Chain_First( &executing->Scheduler.Scheduler_nodes );
0225   tail = _Chain_Immutable_tail( &executing->Scheduler.Scheduler_nodes );
0226 
0227   do {
0228     Scheduler_Node          *scheduler_node;
0229     const Scheduler_Control *scheduler;
0230     ISR_lock_Context         lock_context;
0231 
0232     scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
0233     scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
0234 
0235     _Scheduler_Acquire_critical( scheduler, &lock_context );
0236     ( *scheduler->Operations.reconsider_help_request )(
0237       scheduler,
0238       executing,
0239       scheduler_node
0240     );
0241     _Scheduler_Release_critical( scheduler, &lock_context );
0242 
0243     node = _Chain_Next( node );
0244   } while ( node != tail );
0245 #else
0246   (void) executing;
0247 #endif
0248 }
0249 
0250 static Thread_Action *_Thread_Get_post_switch_action(
0251   Thread_Control *executing
0252 )
0253 {
0254   Chain_Control *chain = &executing->Post_switch_actions.Chain;
0255 
0256   return (Thread_Action *) _Chain_Get_unprotected( chain );
0257 }
0258 
0259 static void _Thread_Run_post_switch_actions( Thread_Control *executing )
0260 {
0261   ISR_lock_Context  lock_context;
0262   Thread_Action    *action;
0263 
0264   _Thread_State_acquire( executing, &lock_context );
0265   _Thread_Post_switch_cleanup( executing );
0266   action = _Thread_Get_post_switch_action( executing );
0267 
0268   while ( action != NULL ) {
0269     _Chain_Set_off_chain( &action->Node );
0270     ( *action->handler )( executing, action, &lock_context );
0271     action = _Thread_Get_post_switch_action( executing );
0272   }
0273 
0274   _Thread_State_release( executing, &lock_context );
0275 }
0276 
0277 void _Thread_Do_dispatch( Per_CPU_Control *cpu_self, ISR_Level level )
0278 {
0279   Thread_Control *executing;
0280 
0281   _Assert( cpu_self->thread_dispatch_disable_level == 1 );
0282 
0283 #if defined(RTEMS_SCORE_ROBUST_THREAD_DISPATCH)
0284   if (
0285     !_ISR_Is_enabled( level )
0286 #if defined(RTEMS_SMP) && CPU_ENABLE_ROBUST_THREAD_DISPATCH == FALSE
0287       && _SMP_Need_inter_processor_interrupts()
0288 #endif
0289   ) {
0290     _Internal_error( INTERNAL_ERROR_BAD_THREAD_DISPATCH_ENVIRONMENT );
0291   }
0292 #endif
0293 
0294   executing = cpu_self->executing;
0295 
0296   do {
0297     Thread_Control                     *heir;
0298     const Thread_CPU_budget_operations *cpu_budget_operations;
0299 
0300     level = _Thread_Preemption_intervention( executing, cpu_self, level );
0301     heir = _Thread_Get_heir_and_make_it_executing( cpu_self );
0302 
0303     /*
0304      * If the heir and executing are the same, then there is no need to do a
0305      * context switch.  Proceed to run the post switch actions.  This is
0306      * normally done to dispatch signals.
0307      */
0308     if ( heir == executing ) {
0309       break;
0310     }
0311 
0312     /*
0313      *  Since heir and executing are not the same, we need to do a real
0314      *  context switch.
0315      */
0316 
0317     cpu_budget_operations = heir->CPU_budget.operations;
0318 
0319     if ( cpu_budget_operations != NULL ) {
0320       ( *cpu_budget_operations->at_context_switch )( heir );
0321     }
0322 
0323     _ISR_Local_enable( level );
0324 
0325 #if !defined(RTEMS_SMP)
0326     _User_extensions_Thread_switch( executing, heir );
0327 #endif
0328     _Thread_Save_fp( executing );
0329     _Context_Switch( &executing->Registers, &heir->Registers );
0330     _Thread_Restore_fp( executing );
0331 #if defined(RTEMS_SMP)
0332     _User_extensions_Thread_switch( NULL, executing );
0333 #endif
0334 
0335     /*
0336      * We have to obtain this value again after the context switch since the
0337      * heir thread may have migrated from another processor.  Values from the
0338      * stack or non-volatile registers reflect the old execution environment.
0339      */
0340     cpu_self = _Per_CPU_Get();
0341 
0342     _ISR_Local_disable( level );
0343   } while ( cpu_self->dispatch_necessary );
0344 
0345   /*
0346    * We are done with context switching.  Proceed to run the post switch
0347    * actions.
0348    */
0349 
0350   _Assert( cpu_self->thread_dispatch_disable_level == 1 );
0351   cpu_self->thread_dispatch_disable_level = 0;
0352   _Profiling_Thread_dispatch_enable( cpu_self, 0 );
0353 
0354   _ISR_Local_enable( level );
0355 
0356   _Thread_Run_post_switch_actions( executing );
0357 }
0358 
0359 void _Thread_Dispatch_direct( Per_CPU_Control *cpu_self )
0360 {
0361   ISR_Level level;
0362 
0363   if ( cpu_self->thread_dispatch_disable_level != 1 ) {
0364     _Internal_error( INTERNAL_ERROR_BAD_THREAD_DISPATCH_DISABLE_LEVEL );
0365   }
0366 
0367   _ISR_Local_disable( level );
0368   _Thread_Do_dispatch( cpu_self, level );
0369 }
0370 
0371 RTEMS_ALIAS( _Thread_Dispatch_direct ) void
0372 _Thread_Dispatch_direct_no_return( Per_CPU_Control * );
0373 
0374 void _Thread_Dispatch_enable( Per_CPU_Control *cpu_self )
0375 {
0376   uint32_t disable_level = cpu_self->thread_dispatch_disable_level;
0377 
0378   if ( disable_level == 1 ) {
0379     ISR_Level level;
0380 
0381     _ISR_Local_disable( level );
0382 
0383     if (
0384       cpu_self->dispatch_necessary
0385 #if defined(RTEMS_SCORE_ROBUST_THREAD_DISPATCH)
0386         || !_ISR_Is_enabled( level )
0387 #endif
0388     ) {
0389       _Thread_Do_dispatch( cpu_self, level );
0390     } else {
0391       cpu_self->thread_dispatch_disable_level = 0;
0392       _Profiling_Thread_dispatch_enable( cpu_self, 0 );
0393       _ISR_Local_enable( level );
0394     }
0395   } else {
0396     _Assert( disable_level > 0 );
0397     cpu_self->thread_dispatch_disable_level = disable_level - 1;
0398   }
0399 }