Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:24:27

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /**
0004  * @file
0005  *
0006  * @ingroup RTEMSScoreThread
0007  *
0008  * @brief This source file contains the implementation of
0009  *   _Thread_Initialize().
0010  */
0011 
0012 /*
0013  *  COPYRIGHT (c) 1989-2014.
0014  *  On-Line Applications Research Corporation (OAR).
0015  *
0016  * Redistribution and use in source and binary forms, with or without
0017  * modification, are permitted provided that the following conditions
0018  * are met:
0019  * 1. Redistributions of source code must retain the above copyright
0020  *    notice, this list of conditions and the following disclaimer.
0021  * 2. Redistributions in binary form must reproduce the above copyright
0022  *    notice, this list of conditions and the following disclaimer in the
0023  *    documentation and/or other materials provided with the distribution.
0024  *
0025  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0026  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0027  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0028  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
0029  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0030  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0031  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0032  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0033  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0034  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
0035  * POSSIBILITY OF SUCH DAMAGE.
0036  */
0037 
0038 #ifdef HAVE_CONFIG_H
0039 #include "config.h"
0040 #endif
0041 
0042 #include <rtems/score/threadimpl.h>
0043 #include <rtems/score/freechainimpl.h>
0044 #include <rtems/score/schedulerimpl.h>
0045 #include <rtems/score/stackimpl.h>
0046 #include <rtems/score/tls.h>
0047 #include <rtems/score/userextimpl.h>
0048 #include <rtems/score/watchdogimpl.h>
0049 
0050 void _Thread_Free(
0051   Thread_Information *information,
0052   Thread_Control     *the_thread
0053 )
0054 {
0055 #if defined(RTEMS_SMP)
0056   Scheduler_Node *scheduler_node;
0057   size_t          scheduler_index;
0058 #endif
0059 
0060   _User_extensions_Thread_delete( the_thread );
0061   _User_extensions_Destroy_iterators( the_thread );
0062   _ISR_lock_Destroy( &the_thread->Keys.Lock );
0063 
0064 #if defined(RTEMS_SMP)
0065   scheduler_node = the_thread->Scheduler.nodes;
0066   scheduler_index = 0;
0067 
0068   while ( scheduler_index < _Scheduler_Count ) {
0069     _Scheduler_Node_destroy(
0070       &_Scheduler_Table[ scheduler_index ],
0071       scheduler_node
0072     );
0073     scheduler_node = (Scheduler_Node *)
0074       ( (uintptr_t) scheduler_node + _Scheduler_Node_size );
0075     ++scheduler_index;
0076   }
0077 #else
0078   _Scheduler_Node_destroy(
0079     _Thread_Scheduler_get_home( the_thread ),
0080     _Thread_Scheduler_get_home_node( the_thread )
0081   );
0082 #endif
0083 
0084   _ISR_lock_Destroy( &the_thread->Timer.Lock );
0085 
0086   /*
0087    *  The thread might have been FP.  So deal with that.
0088    */
0089 #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
0090 #if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
0091   if ( _Thread_Is_allocated_fp( the_thread ) )
0092     _Thread_Deallocate_fp();
0093 #endif
0094 #endif
0095 
0096   _Freechain_Push(
0097     &information->Thread_queue_heads.Free,
0098     the_thread->Wait.spare_heads
0099   );
0100 
0101   /*
0102    *  Free the rest of the memory associated with this task
0103    *  and set the associated pointers to NULL for safety.
0104    */
0105   ( *the_thread->Start.stack_free )( the_thread->Start.Initial_stack.area );
0106 
0107 #if defined(RTEMS_SMP)
0108   _ISR_lock_Destroy( &the_thread->Scheduler.Lock );
0109   _ISR_lock_Destroy( &the_thread->Wait.Lock.Default );
0110   _SMP_lock_Stats_destroy( &the_thread->Potpourri_stats );
0111 #endif
0112 
0113   _Thread_queue_Destroy( &the_thread->Join_queue );
0114   _Context_Destroy( the_thread, &the_thread->Registers );
0115   _Objects_Free( &information->Objects, &the_thread->Object );
0116 }
0117 
0118 static void _Thread_Initialize_scheduler_and_wait_nodes(
0119   Thread_Control             *the_thread,
0120   const Thread_Configuration *config
0121 )
0122 {
0123   Scheduler_Node          *home_scheduler_node;
0124 #if defined(RTEMS_SMP)
0125   Scheduler_Node          *scheduler_node;
0126   const Scheduler_Control *scheduler;
0127   size_t                   scheduler_index;
0128 #endif
0129 
0130 #if defined(RTEMS_SMP)
0131   home_scheduler_node = NULL;
0132   scheduler_node = the_thread->Scheduler.nodes;
0133   scheduler = &_Scheduler_Table[ 0 ];
0134   scheduler_index = 0;
0135 
0136   /*
0137    * In SMP configurations, the thread has exactly one scheduler node for each
0138    * configured scheduler.  Initialize the scheduler nodes of each scheduler.
0139    * The application configuration ensures that we have at least one scheduler
0140    * configured.
0141    */
0142 
0143   _Assert ( _Scheduler_Count >= 1 );
0144 
0145   do {
0146     Priority_Control priority;
0147 
0148     if ( scheduler == config->scheduler ) {
0149       priority = config->priority;
0150       home_scheduler_node = scheduler_node;
0151     } else {
0152       /*
0153        * Use the idle thread priority for the non-home scheduler instances by
0154        * default.
0155        */
0156       priority = _Scheduler_Map_priority(
0157         scheduler,
0158         scheduler->maximum_priority
0159       );
0160     }
0161 
0162     _Scheduler_Node_initialize(
0163       scheduler,
0164       scheduler_node,
0165       the_thread,
0166       priority
0167     );
0168 
0169     /*
0170      * Since the size of a scheduler node depends on the application
0171      * configuration, the _Scheduler_Node_size constant is used to get the next
0172      * scheduler node.  Using sizeof( Scheduler_Node ) would be wrong.
0173      */
0174     scheduler_node = (Scheduler_Node *)
0175       ( (uintptr_t) scheduler_node + _Scheduler_Node_size );
0176     ++scheduler;
0177     ++scheduler_index;
0178   } while ( scheduler_index < _Scheduler_Count );
0179 
0180   /*
0181    * The thread is initialized to use exactly one scheduler node which is
0182    * provided by its home scheduler.
0183    */
0184   _Assert( home_scheduler_node != NULL );
0185   _Chain_Initialize_one(
0186     &the_thread->Scheduler.Wait_nodes,
0187     &home_scheduler_node->Thread.Wait_node
0188   );
0189   _Chain_Initialize_one(
0190     &the_thread->Scheduler.Scheduler_nodes,
0191     &home_scheduler_node->Thread.Scheduler_node.Chain
0192   );
0193 #else
0194   /*
0195    * In uniprocessor configurations, the thread has exactly one scheduler node.
0196    */
0197   home_scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
0198   _Scheduler_Node_initialize(
0199     config->scheduler,
0200     home_scheduler_node,
0201     the_thread,
0202     config->priority
0203   );
0204 #endif
0205 
0206   /*
0207    * The current priority of the thread is initialized to exactly the real
0208    * priority of the thread.  During the lifetime of the thread, it may gain
0209    * more priority nodes, for example through locking protocols such as
0210    * priority inheritance or priority ceiling.
0211    */
0212   _Priority_Node_initialize( &the_thread->Real_priority, config->priority );
0213   _Priority_Initialize_one(
0214     &home_scheduler_node->Wait.Priority,
0215     &the_thread->Real_priority
0216   );
0217 
0218 #if defined(RTEMS_SMP)
0219   RTEMS_STATIC_ASSERT( THREAD_SCHEDULER_BLOCKED == 0, Scheduler_state );
0220   the_thread->Scheduler.home_scheduler = config->scheduler;
0221   _ISR_lock_Initialize( &the_thread->Scheduler.Lock, "Thread Scheduler" );
0222   _ISR_lock_Initialize( &the_thread->Wait.Lock.Default, "Thread Wait Default" );
0223   _Thread_queue_Gate_open( &the_thread->Wait.Lock.Tranquilizer );
0224   _RBTree_Initialize_node( &the_thread->Wait.Link.Registry_node );
0225 #endif
0226 }
0227 
0228 static bool _Thread_Try_initialize(
0229   Thread_Information         *information,
0230   Thread_Control             *the_thread,
0231   const Thread_Configuration *config
0232 )
0233 {
0234   uintptr_t                tls_size;
0235   size_t                   i;
0236   char                    *stack_begin;
0237   char                    *stack_end;
0238   uintptr_t                stack_align;
0239   Per_CPU_Control         *cpu = _Per_CPU_Get_by_index( 0 );
0240 
0241   memset(
0242     &the_thread->Join_queue,
0243     0,
0244     information->Objects.object_size - offsetof( Thread_Control, Join_queue )
0245   );
0246 
0247   for ( i = 0 ; i < _Thread_Control_add_on_count ; ++i ) {
0248     const Thread_Control_add_on *add_on = &_Thread_Control_add_ons[ i ];
0249 
0250     *(void **) ( (char *) the_thread + add_on->destination_offset ) =
0251       (char *) the_thread + add_on->source_offset;
0252   }
0253 
0254   /* Set up the properly aligned stack area begin and end */
0255   stack_begin = config->stack_area;
0256   stack_end = stack_begin + config->stack_size;
0257   stack_align = CPU_STACK_ALIGNMENT;
0258   stack_end = (char *) RTEMS_ALIGN_DOWN( (uintptr_t) stack_end, stack_align );
0259 
0260   /* Allocate floating-point context in stack area */
0261 #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
0262   if ( config->is_fp ) {
0263     stack_end -= CONTEXT_FP_SIZE;
0264     the_thread->fp_context = (Context_Control_fp *) stack_end;
0265     the_thread->Start.fp_context = (Context_Control_fp *) stack_end;
0266   }
0267 #endif
0268 
0269   tls_size = _TLS_Get_allocation_size();
0270 
0271   /* Allocate thread-local storage (TLS) area in stack area */
0272   if ( tls_size > 0 ) {
0273     stack_end -= tls_size;
0274     the_thread->Start.tls_area = stack_end;
0275   }
0276 
0277   _Stack_Initialize(
0278     &the_thread->Start.Initial_stack,
0279     stack_begin,
0280     stack_end - stack_begin
0281   );
0282 
0283   /*
0284    *  Get thread queue heads
0285    */
0286   the_thread->Wait.spare_heads = _Freechain_Pop(
0287     &information->Thread_queue_heads.Free
0288   );
0289   _Thread_queue_Heads_initialize( the_thread->Wait.spare_heads );
0290 
0291   /*
0292    *  General initialization
0293    */
0294 
0295   the_thread->is_fp                       = config->is_fp;
0296   the_thread->Start.isr_level             = config->isr_level;
0297   the_thread->Start.is_preemptible        = config->is_preemptible;
0298   the_thread->Start.cpu_budget_operations = config->cpu_budget_operations;
0299   the_thread->Start.stack_free            = config->stack_free;
0300   the_thread->Join_queue.Queue.owner      = the_thread;
0301 
0302   _Thread_Timer_initialize( &the_thread->Timer, cpu );
0303   _Thread_Initialize_scheduler_and_wait_nodes( the_thread, config );
0304 
0305 #if defined(RTEMS_SMP)
0306   _Processor_mask_Assign(
0307     &the_thread->Scheduler.Affinity,
0308     _SMP_Get_online_processors()
0309    );
0310   _SMP_lock_Stats_initialize( &the_thread->Potpourri_stats, "Thread Potpourri" );
0311   _SMP_lock_Stats_initialize( &the_thread->Join_queue.Lock_stats, "Thread State" );
0312 #endif
0313 
0314   /* Initialize the CPU for the non-SMP schedulers */
0315   _Thread_Set_CPU( the_thread, cpu );
0316 
0317   the_thread->current_state           = STATES_DORMANT;
0318   the_thread->Wait.operations         = &_Thread_queue_Operations_default;
0319   the_thread->Start.initial_priority  = config->priority;
0320 
0321   RTEMS_STATIC_ASSERT( THREAD_WAIT_STATE_READY == 0, Wait_flags );
0322 
0323   /* POSIX Keys */
0324   _RBTree_Initialize_empty( &the_thread->Keys.Key_value_pairs );
0325   _ISR_lock_Initialize( &the_thread->Keys.Lock, "POSIX Key Value Pairs" );
0326 
0327   _Thread_Action_control_initialize( &the_thread->Post_switch_actions );
0328 
0329   _Objects_Open_u32( &information->Objects, &the_thread->Object, config->name );
0330 
0331   /*
0332    * We do following checks of simple error conditions after the thread is
0333    * fully initialized to simplify the clean up in case of an error.  With a
0334    * fully initialized thread we can simply use _Thread_Free() and do not have
0335    * to bother with partially initialized threads.
0336    */
0337 
0338 #if defined(RTEMS_SMP)
0339   if (
0340     !config->is_preemptible
0341       && !_Scheduler_Is_non_preempt_mode_supported( config->scheduler )
0342   ) {
0343     return false;
0344   }
0345 #endif
0346 
0347 #if defined(RTEMS_SMP) || CPU_ENABLE_ROBUST_THREAD_DISPATCH == TRUE
0348   if (
0349     config->isr_level != 0
0350 #if CPU_ENABLE_ROBUST_THREAD_DISPATCH == FALSE
0351       && _SMP_Need_inter_processor_interrupts()
0352 #endif
0353   ) {
0354     return false;
0355   }
0356 #endif
0357 
0358   /*
0359    *  We assume the Allocator Mutex is locked and dispatching is
0360    *  enabled when we get here.  We want to be able to run the
0361    *  user extensions with dispatching enabled.  The Allocator
0362    *  Mutex provides sufficient protection to let the user extensions
0363    *  run safely.
0364    */
0365   return _User_extensions_Thread_create( the_thread );
0366 }
0367 
0368 Status_Control _Thread_Initialize(
0369   Thread_Information         *information,
0370   Thread_Control             *the_thread,
0371   const Thread_Configuration *config
0372 )
0373 {
0374   bool ok;
0375 
0376   ok = _Thread_Try_initialize( information, the_thread, config );
0377 
0378   if ( !ok ) {
0379     _Objects_Close( &information->Objects, &the_thread->Object );
0380     _Thread_Free( information, the_thread );
0381 
0382     return STATUS_UNSATISFIED;
0383   }
0384 
0385   return STATUS_SUCCESSFUL;
0386 }