Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:24:13

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /**
0004  * @file
0005  *
0006  * @ingroup RTEMSScorePerCPU
0007  *
0008  * @brief This header file provides the interfaces of the
0009  *   @ref RTEMSScorePerCPU.
0010  */
0011 
0012 /*
0013  *  COPYRIGHT (c) 1989-2011.
0014  *  On-Line Applications Research Corporation (OAR).
0015  *
0016  *  Copyright (C) 2012, 2018 embedded brains GmbH & Co. KG
0017  *
0018  * Redistribution and use in source and binary forms, with or without
0019  * modification, are permitted provided that the following conditions
0020  * are met:
0021  * 1. Redistributions of source code must retain the above copyright
0022  *    notice, this list of conditions and the following disclaimer.
0023  * 2. Redistributions in binary form must reproduce the above copyright
0024  *    notice, this list of conditions and the following disclaimer in the
0025  *    documentation and/or other materials provided with the distribution.
0026  *
0027  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0028  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0029  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0030  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
0031  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0032  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0033  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0034  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0035  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0036  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
0037  * POSSIBILITY OF SUCH DAMAGE.
0038  */
0039 
0040 #ifndef _RTEMS_PERCPU_H
0041 #define _RTEMS_PERCPU_H
0042 
0043 #include <rtems/score/cpuimpl.h>
0044 
0045 #if defined( ASM )
0046   #include <rtems/asm.h>
0047 #else
0048   #include <rtems/score/assert.h>
0049   #include <rtems/score/chain.h>
0050   #include <rtems/score/isrlock.h>
0051   #include <rtems/score/smp.h>
0052   #include <rtems/score/timestamp.h>
0053   #include <rtems/score/watchdog.h>
0054 #endif
0055 
0056 #ifdef __cplusplus
0057 extern "C" {
0058 #endif
0059 
0060 #if defined( RTEMS_SMP )
0061   #if defined( RTEMS_PROFILING )
0062     #define PER_CPU_CONTROL_SIZE_PROFILING 332
0063   #else
0064     #define PER_CPU_CONTROL_SIZE_PROFILING 0
0065   #endif
0066 
0067   #if defined( RTEMS_DEBUG )
0068     #define PER_CPU_CONTROL_SIZE_DEBUG 76
0069   #else
0070     #define PER_CPU_CONTROL_SIZE_DEBUG 0
0071   #endif
0072 
0073   #if CPU_SIZEOF_POINTER > 4
0074     #define PER_CPU_CONTROL_SIZE_BIG_POINTER 76
0075   #else
0076     #define PER_CPU_CONTROL_SIZE_BIG_POINTER 0
0077   #endif
0078 
0079   #define PER_CPU_CONTROL_SIZE_BASE 180
0080   #define PER_CPU_CONTROL_SIZE_APPROX \
0081     ( PER_CPU_CONTROL_SIZE_BASE + CPU_PER_CPU_CONTROL_SIZE + \
0082     CPU_INTERRUPT_FRAME_SIZE + PER_CPU_CONTROL_SIZE_PROFILING + \
0083     PER_CPU_CONTROL_SIZE_DEBUG + PER_CPU_CONTROL_SIZE_BIG_POINTER )
0084 
0085   /*
0086    * This ensures that on SMP configurations the individual per-CPU controls
0087    * are on different cache lines to prevent false sharing.  This define can be
0088    * used in assembler code to easily get the per-CPU control for a particular
0089    * processor.
0090    */
0091   #if PER_CPU_CONTROL_SIZE_APPROX > 1024
0092     #define PER_CPU_CONTROL_SIZE_LOG2 11
0093   #elif PER_CPU_CONTROL_SIZE_APPROX > 512
0094     #define PER_CPU_CONTROL_SIZE_LOG2 10
0095   #elif PER_CPU_CONTROL_SIZE_APPROX > 256
0096     #define PER_CPU_CONTROL_SIZE_LOG2 9
0097   #elif PER_CPU_CONTROL_SIZE_APPROX > 128
0098     #define PER_CPU_CONTROL_SIZE_LOG2 8
0099   #else
0100     #define PER_CPU_CONTROL_SIZE_LOG2 7
0101   #endif
0102 
0103   #define PER_CPU_CONTROL_SIZE ( 1 << PER_CPU_CONTROL_SIZE_LOG2 )
0104 #endif
0105 
0106 #if !defined( ASM )
0107 
0108 struct Record_Control;
0109 
0110 struct _Thread_Control;
0111 
0112 struct Scheduler_Context;
0113 
0114 /**
0115  * @defgroup RTEMSScorePerCPU Per-CPU Information
0116  *
0117  * @ingroup RTEMSScore
0118  *
0119  * @brief This group contains the implementation of the per-CPU information.
0120  *
0121  * The per-CPU information encapsulates state which is maintained for each
0122  * configured processor in the system.  There will be one instance of a
0123  * ::Per_CPU_Control in the ::_Per_CPU_Information table for each configured
0124  * processor in the system.
0125  */
0126 
0127 /**@{*/
0128 
0129 #if defined( RTEMS_SMP )
0130 
0131 /**
0132  * @brief State of a processor.
0133  *
0134  * The processor state controls the life cycle of processors at the lowest
0135  * level.  No multi-threading or other high-level concepts matter here.
0136  *
0137  * The state of a processor is indicated by the Per_CPU_Control::state membe.
0138  * The current state of a processor can be get by _Per_CPU_Get_state().  Only
0139  * the processor associated with the control may change its state using
0140  * _Per_CPU_Set_state().
0141  *
0142  * Due to the sequential nature of the basic system initialization one
0143  * processor has a special role.  It is the processor executing the boot_card()
0144  * function.  This processor is called the boot processor.  All other
0145  * processors are called secondary.  The boot processor uses
0146  * _SMP_Request_start_multitasking() to indicate that processors should start
0147  * multiprocessing.  Secondary processors will wait for this request in
0148  * _SMP_Start_multitasking_on_secondary_processor().
0149  *
0150  * @dot
0151  * digraph states {
0152  *   i [label="PER_CPU_STATE_INITIAL"];
0153  *   rdy [label="PER_CPU_STATE_READY_TO_START_MULTITASKING"];
0154  *   reqsm [label="PER_CPU_STATE_REQUEST_START_MULTITASKING"];
0155  *   u [label="PER_CPU_STATE_UP"];
0156  *   s [label="PER_CPU_STATE_SHUTDOWN"];
0157  *   i -> rdy [label="processor\ncompleted initialization"];
0158  *   rdy -> reqsm [label="boot processor\ncompleted initialization"];
0159  *   reqsm -> u [label="processor\nstarts multitasking"];
0160  *   i -> s;
0161  *   rdy -> s;
0162  *   reqsm -> s;
0163  *   u -> s;
0164  * }
0165  * @enddot
0166  */
0167 typedef enum {
0168   /**
0169    * @brief The per CPU controls are initialized to zero.
0170    *
0171    * The boot processor executes the sequential boot code in this state.  The
0172    * secondary processors should perform their basic initialization now and
0173    * change into the PER_CPU_STATE_READY_TO_START_MULTITASKING state once this
0174    * is complete.
0175    */
0176   PER_CPU_STATE_INITIAL,
0177 
0178   /**
0179    * @brief Processor is ready to start multitasking.
0180    *
0181    * The secondary processor performed its basic initialization and is ready to
0182    * receive inter-processor interrupts.  Interrupt delivery must be disabled
0183    * in this state, but requested inter-processor interrupts must be recorded
0184    * and must be delivered once the secondary processor enables interrupts for
0185    * the first time.  The boot processor will wait for all secondary processors
0186    * to change into this state.  In case a secondary processor does not reach
0187    * this state the system will not start.  The secondary processors wait now
0188    * for a change into the PER_CPU_STATE_UP state set requested by the boot
0189    * processor through ::_SMP_Ready_to_start_multitasking once all secondary
0190    * processors reached the PER_CPU_STATE_READY_TO_START_MULTITASKING state.
0191    */
0192   PER_CPU_STATE_READY_TO_START_MULTITASKING,
0193 
0194   /**
0195    * @brief Normal multitasking state.
0196    */
0197   PER_CPU_STATE_UP,
0198 
0199   /**
0200    * @brief This is the terminal state.
0201    */
0202   PER_CPU_STATE_SHUTDOWN
0203 } Per_CPU_State;
0204 
0205 typedef void ( *Per_CPU_Job_handler )( void *arg );
0206 
0207 /**
0208  * @brief Context for per-processor jobs.
0209  *
0210  * This is separate from Per_CPU_Job to save stack memory in
0211  * _SMP_Multicast_action().
0212  */
0213 typedef struct {
0214   /**
0215    * @brief The job handler.
0216    */
0217   Per_CPU_Job_handler handler;
0218 
0219   /**
0220    * @brief The job handler argument.
0221    */
0222   void *arg;
0223 } Per_CPU_Job_context;
0224 
0225 /*
0226  * Value for the Per_CPU_Job::done member to indicate that a job is done
0227  * (handler was called on the target processor).  Must not be a valid pointer
0228  * value since it overlaps with the Per_CPU_Job::next member.
0229  */
0230 #define PER_CPU_JOB_DONE 1
0231 
0232 /**
0233  * @brief A per-processor job.
0234  *
0235  * This structure must be as small as possible due to stack space constraints
0236  * in _SMP_Multicast_action().
0237  */
0238 typedef struct Per_CPU_Job {
0239   union {
0240     /**
0241      * @brief The next job in the corresponding per-processor job list.
0242      */
0243     struct Per_CPU_Job *next;
0244 
0245     /**
0246      * @brief Indication if the job is done.
0247      *
0248      * A job is done if this member has the value PER_CPU_JOB_DONE.  This
0249      * assumes that PER_CPU_JOB_DONE is not a valid pointer value.
0250      */
0251     Atomic_Ulong done;
0252   };
0253 
0254   /**
0255    * @brief Pointer to the job context to get the handler and argument.
0256    */
0257   const Per_CPU_Job_context *context;
0258 } Per_CPU_Job;
0259 
0260 #endif /* defined( RTEMS_SMP ) */
0261 
0262 /**
0263  * @brief Per-CPU statistics.
0264  */
0265 
0266 /*
0267  * This was added to address the following warning:
0268  * warning: struct has no members
0269  */
0270 #pragma GCC diagnostic push
0271 #pragma GCC diagnostic ignored "-Wpedantic"
0272 typedef struct {
0273 #if defined( RTEMS_PROFILING )
0274   /**
0275    * @brief The thread dispatch disabled begin instant in CPU counter ticks.
0276    *
0277    * This value is used to measure the time of disabled thread dispatching.
0278    */
0279   CPU_Counter_ticks thread_dispatch_disabled_instant;
0280 
0281   /**
0282    * @brief The maximum time of disabled thread dispatching in CPU counter
0283    * ticks.
0284    */
0285   CPU_Counter_ticks max_thread_dispatch_disabled_time;
0286 
0287   /**
0288    * @brief The maximum time spent to process a single sequence of nested
0289    * interrupts in CPU counter ticks.
0290    *
0291    * This is the time interval between the change of the interrupt nest level
0292    * from zero to one and the change back from one to zero.
0293    */
0294   CPU_Counter_ticks max_interrupt_time;
0295 
0296   /**
0297    * @brief The maximum interrupt delay in CPU counter ticks if supported by
0298    * the hardware.
0299    */
0300   CPU_Counter_ticks max_interrupt_delay;
0301 
0302   /**
0303    * @brief Count of times when the thread dispatch disable level changes from
0304    * zero to one in thread context.
0305    *
0306    * This value may overflow.
0307    */
0308   uint64_t thread_dispatch_disabled_count;
0309 
0310   /**
0311    * @brief Total time of disabled thread dispatching in CPU counter ticks.
0312    *
0313    * The average time of disabled thread dispatching is the total time of
0314    * disabled thread dispatching divided by the thread dispatch disabled
0315    * count.
0316    *
0317    * This value may overflow.
0318    */
0319   uint64_t total_thread_dispatch_disabled_time;
0320 
0321   /**
0322    * @brief Count of times when the interrupt nest level changes from zero to
0323    * one.
0324    *
0325    * This value may overflow.
0326    */
0327   uint64_t interrupt_count;
0328 
0329   /**
0330    * @brief Total time of interrupt processing in CPU counter ticks.
0331    *
0332    * The average time of interrupt processing is the total time of interrupt
0333    * processing divided by the interrupt count.
0334    *
0335    * This value may overflow.
0336    */
0337   uint64_t total_interrupt_time;
0338 #endif /* defined( RTEMS_PROFILING ) */
0339 } Per_CPU_Stats;
0340 #pragma GCC diagnostic pop
0341 
0342 /**
0343  * @brief Per-CPU watchdog header index.
0344  */
0345 typedef enum {
0346   /**
0347    * @brief Index for tick clock per-CPU watchdog header.
0348    *
0349    * The reference time point for the tick clock is the system start.  The
0350    * clock resolution is one system clock tick.  It is used for the system
0351    * clock tick based time services.
0352    */
0353   PER_CPU_WATCHDOG_TICKS,
0354 
0355   /**
0356    * @brief Index for realtime clock per-CPU watchdog header.
0357    *
0358    * The reference time point for the realtime clock is the POSIX Epoch.  The
0359    * clock resolution is one nanosecond.  It is used for the time of day
0360    * services and the POSIX services using CLOCK_REALTIME.
0361    */
0362   PER_CPU_WATCHDOG_REALTIME,
0363 
0364   /**
0365    * @brief Index for monotonic clock per-CPU watchdog header.
0366    *
0367    * The reference time point for the monotonic clock is the system start.  The
0368    * clock resolution is one nanosecond.  It is used for the POSIX services
0369    * using CLOCK_MONOTONIC.
0370    */
0371   PER_CPU_WATCHDOG_MONOTONIC,
0372 
0373   /**
0374    * @brief Count of per-CPU watchdog headers.
0375    */
0376   PER_CPU_WATCHDOG_COUNT
0377 } Per_CPU_Watchdog_index;
0378 
0379 /**
0380  *  @brief Per CPU Core Structure
0381  *
0382  *  This structure is used to hold per core state information.
0383  */
0384 typedef struct Per_CPU_Control {
0385   #if CPU_PER_CPU_CONTROL_SIZE > 0
0386     /**
0387      * @brief CPU port specific control.
0388      */
0389     CPU_Per_CPU_control cpu_per_cpu;
0390   #endif
0391 
0392   /**
0393    * @brief The interrupt stack low address for this processor.
0394    */
0395   void *interrupt_stack_low;
0396 
0397   /**
0398    * @brief The interrupt stack high address for this processor.
0399    */
0400   void *interrupt_stack_high;
0401 
0402   /**
0403    *  This contains the current interrupt nesting level on this
0404    *  CPU.
0405    */
0406   uint32_t isr_nest_level;
0407 
0408   /**
0409    * @brief Indicates if an ISR thread dispatch is disabled.
0410    *
0411    * This flag is context switched with each thread.  It indicates that this
0412    * thread has an interrupt stack frame on its stack.  By using this flag, we
0413    * can avoid nesting more interrupt dispatching attempts on a previously
0414    * interrupted thread's stack.
0415    */
0416   uint32_t isr_dispatch_disable;
0417 
0418   /**
0419    * @brief The thread dispatch critical section nesting counter which is used
0420    * to prevent context switches at inopportune moments.
0421    */
0422   volatile uint32_t thread_dispatch_disable_level;
0423 
0424   /**
0425    * @brief This is set to true when this processor needs to run the thread
0426    * dispatcher.
0427    *
0428    * It is volatile since interrupts may alter this flag.
0429    *
0430    * This member is not protected by a lock and must be accessed only by this
0431    * processor.  Code (e.g. scheduler and post-switch action requests) running
0432    * on another processors must use an inter-processor interrupt to set the
0433    * thread dispatch necessary indicator to true.
0434    *
0435    * @see _Thread_Get_heir_and_make_it_executing().
0436    */
0437   volatile bool dispatch_necessary;
0438 
0439   /*
0440    * Ensure that the executing member is at least 4-byte aligned, see
0441    * PER_CPU_OFFSET_EXECUTING.  This is necessary on CPU ports with relaxed
0442    * alignment restrictions, e.g. type alignment is less than the type size.
0443    */
0444   bool reserved_for_executing_alignment[ 3 ];
0445 
0446   /**
0447    * @brief This is the thread executing on this processor.
0448    *
0449    * This member is not protected by a lock.  The only writer is this
0450    * processor.
0451    *
0452    * On SMP configurations a thread may be registered as executing on more than
0453    * one processor in case a thread migration is in progress.  On SMP
0454    * configurations use _Thread_Is_executing_on_a_processor() to figure out if
0455    * a thread context is executing on a processor.
0456    */
0457   struct _Thread_Control *executing;
0458 
0459   /**
0460    * @brief This is the heir thread for this processor.
0461    *
0462    * This member is not protected by a lock.  The only writer after
0463    * multitasking start is the scheduler owning this processor.  It is assumed
0464    * that stores to pointers are atomic on all supported SMP architectures.
0465    * The CPU port specific code (inter-processor interrupt handling and
0466    * _CPU_SMP_Send_interrupt()) must guarantee that this processor observes the
0467    * last value written.
0468    *
0469    * A thread can be a heir on at most one processor in the system.
0470    *
0471    * @see _Thread_Get_heir_and_make_it_executing().
0472    */
0473   struct _Thread_Control *heir;
0474 
0475 #if defined(RTEMS_SMP)
0476   CPU_Interrupt_frame Interrupt_frame;
0477 #endif
0478 
0479   /**
0480    * @brief The CPU usage timestamp contains the time point of the last heir
0481    * thread change or last CPU usage update of the executing thread of this
0482    * processor.
0483    *
0484    * Protected by the scheduler lock.
0485    *
0486    * @see _Scheduler_Update_heir(), _Thread_Dispatch_update_heir() and
0487    * _Thread_Get_CPU_time_used().
0488    */
0489   Timestamp_Control cpu_usage_timestamp;
0490 
0491   /**
0492    * @brief Watchdog state for this processor.
0493    */
0494   struct {
0495 #if defined(RTEMS_SMP)
0496     /**
0497      * @brief Protects all watchdog operations on this processor.
0498      */
0499     ISR_lock_Control Lock;
0500 #endif
0501 
0502     /**
0503      * @brief Watchdog ticks on this processor used for monotonic clock
0504      * watchdogs.
0505      */
0506     uint64_t ticks;
0507 
0508     /**
0509      * @brief Header for watchdogs.
0510      *
0511      * @see Per_CPU_Watchdog_index.
0512      */
0513     Watchdog_Header Header[ PER_CPU_WATCHDOG_COUNT ];
0514   } Watchdog;
0515 
0516   #if defined( RTEMS_SMP )
0517     /**
0518      * @brief This lock protects some members of this structure.
0519      */
0520     ISR_lock_Control Lock;
0521 
0522     /**
0523      * @brief Lock context used to acquire all per-CPU locks.
0524      *
0525      * This member is protected by the Per_CPU_Control::Lock lock.
0526      *
0527      * @see _Per_CPU_Acquire_all().
0528      */
0529     ISR_lock_Context Lock_context;
0530 
0531     /**
0532      * @brief Chain of threads in need for help.
0533      *
0534      * This member is protected by the Per_CPU_Control::Lock lock.
0535      */
0536     Chain_Control Threads_in_need_for_help;
0537 
0538     /**
0539      * @brief Bit field for SMP messages.
0540      *
0541      * This member is not protected locks.  Atomic operations are used to set
0542      * and get the message bits.
0543      */
0544     Atomic_Ulong message;
0545 
0546     struct {
0547       /**
0548        * @brief The scheduler control of the scheduler owning this processor.
0549        *
0550        * This pointer is NULL in case this processor is currently not used by a
0551        * scheduler instance.
0552        */
0553       const struct _Scheduler_Control *control;
0554 
0555       /**
0556        * @brief The scheduler context of the scheduler owning this processor.
0557        *
0558        * This pointer is NULL in case this processor is currently not used by a
0559        * scheduler instance.
0560        */
0561       const struct Scheduler_Context *context;
0562 
0563       /**
0564        * @brief The idle thread for this processor in case it is online and
0565        * currently not used by a scheduler instance.
0566        */
0567       struct _Thread_Control *idle_if_online_and_unused;
0568     } Scheduler;
0569 
0570     /**
0571      * @brief The ancestor of the executing thread.
0572      *
0573      * This member is used by _User_extensions_Thread_switch().
0574      */
0575     struct _Thread_Control *ancestor;
0576 
0577     /**
0578      * @brief Begin of the per-CPU data area.
0579      *
0580      * Contains items defined via PER_CPU_DATA_ITEM().
0581      */
0582     char *data;
0583 
0584     /**
0585      * @brief Indicates the current state of the processor.
0586      *
0587      * Only the processor associated with this control is allowed to change
0588      * this member.
0589      *
0590      * @see _Per_CPU_Get_state() and _Per_CPU_Set_state().
0591      */
0592     Atomic_Uint state;
0593 
0594     /**
0595      * @brief FIFO list of jobs to be performed by this processor.
0596      *
0597      * @see _SMP_Multicast_action().
0598      */
0599     struct {
0600       /**
0601        * @brief Lock to protect the FIFO list of jobs to be performed by this
0602        * processor.
0603        */
0604       ISR_lock_Control Lock;
0605 
0606       /**
0607        * @brief Head of the FIFO list of jobs to be performed by this
0608        * processor.
0609        *
0610        * This member is protected by the Per_CPU_Control::Jobs::Lock lock.
0611        */
0612       struct Per_CPU_Job *head;
0613 
0614       /**
0615        * @brief Tail of the FIFO list of jobs to be performed by this
0616        * processor.
0617        *
0618        * This member is only valid if the head is not @c NULL.
0619        *
0620        * This member is protected by the Per_CPU_Control::Jobs::Lock lock.
0621        */
0622       struct Per_CPU_Job **tail;
0623     } Jobs;
0624 
0625     /**
0626      * @brief Indicates if the processor has been successfully started via
0627      * _CPU_SMP_Start_processor().
0628      */
0629     bool online;
0630 
0631     /**
0632      * @brief Indicates if the processor is the one that performed the initial
0633      * system initialization.
0634      */
0635     bool boot;
0636   #endif
0637 
0638   struct Record_Control *record;
0639 
0640   Per_CPU_Stats Stats;
0641 } Per_CPU_Control;
0642 
0643 #if defined( RTEMS_SMP )
0644 typedef struct {
0645   Per_CPU_Control per_cpu;
0646   char unused_space_for_cache_line_alignment
0647     [ PER_CPU_CONTROL_SIZE - sizeof( Per_CPU_Control ) ];
0648 } Per_CPU_Control_envelope;
0649 #else
0650 typedef struct {
0651   Per_CPU_Control per_cpu;
0652 } Per_CPU_Control_envelope;
0653 #endif
0654 
0655 /**
0656  *  @brief Set of Per CPU Core Information
0657  *
0658  *  This is an array of per CPU core information.
0659  */
0660 extern CPU_STRUCTURE_ALIGNMENT Per_CPU_Control_envelope _Per_CPU_Information[];
0661 
0662 #define _Per_CPU_Acquire( cpu, lock_context ) \
0663   _ISR_lock_Acquire( &( cpu )->Lock, lock_context )
0664 
0665 #define _Per_CPU_Release( cpu, lock_context ) \
0666   _ISR_lock_Release( &( cpu )->Lock, lock_context )
0667 
0668 /*
0669  * If we get the current processor index in a context which allows thread
0670  * dispatching, then we may already run on another processor right after the
0671  * read instruction.  There are very few cases in which this makes sense (here
0672  * we can use _Per_CPU_Get_snapshot()).  All other places must use
0673  * _Per_CPU_Get() so that we can add checks for RTEMS_DEBUG.
0674  */
0675 #if defined( _CPU_Get_current_per_CPU_control )
0676   #define _Per_CPU_Get_snapshot() _CPU_Get_current_per_CPU_control()
0677 #else
0678   #define _Per_CPU_Get_snapshot() \
0679     ( &_Per_CPU_Information[ _SMP_Get_current_processor() ].per_cpu )
0680 #endif
0681 
0682 #if defined( RTEMS_SMP )
0683 static inline Per_CPU_Control *_Per_CPU_Get( void )
0684 {
0685   Per_CPU_Control *cpu_self = _Per_CPU_Get_snapshot();
0686 
0687   _Assert(
0688     cpu_self->thread_dispatch_disable_level != 0 || _ISR_Get_level() != 0
0689   );
0690 
0691   return cpu_self;
0692 }
0693 #else
0694 #define _Per_CPU_Get() _Per_CPU_Get_snapshot()
0695 #endif
0696 
0697 static inline Per_CPU_Control *_Per_CPU_Get_by_index( uint32_t index )
0698 {
0699   return &_Per_CPU_Information[ index ].per_cpu;
0700 }
0701 
0702 static inline uint32_t _Per_CPU_Get_index( const Per_CPU_Control *cpu )
0703 {
0704 #if defined(RTEMS_SMP)
0705   const Per_CPU_Control_envelope *per_cpu_envelope =
0706     ( const Per_CPU_Control_envelope * ) cpu;
0707 
0708   return ( uint32_t ) ( per_cpu_envelope - &_Per_CPU_Information[ 0 ] );
0709 #else
0710   (void) cpu;
0711   return 0;
0712 #endif
0713 }
0714 
0715 static inline struct _Thread_Control *_Per_CPU_Get_executing(
0716   const Per_CPU_Control *cpu
0717 )
0718 {
0719   return cpu->executing;
0720 }
0721 
0722 static inline bool _Per_CPU_Is_ISR_in_progress( const Per_CPU_Control *cpu )
0723 {
0724 #if CPU_PROVIDES_ISR_IS_IN_PROGRESS == TRUE
0725   (void) cpu;
0726   return _ISR_Is_in_progress();
0727 #else
0728   return cpu->isr_nest_level != 0;
0729 #endif
0730 }
0731 
0732 static inline bool _Per_CPU_Is_processor_online(
0733   const Per_CPU_Control *cpu
0734 )
0735 {
0736 #if defined( RTEMS_SMP )
0737   return cpu->online;
0738 #else
0739   (void) cpu;
0740 
0741   return true;
0742 #endif
0743 }
0744 
0745 static inline bool _Per_CPU_Is_boot_processor(
0746   const Per_CPU_Control *cpu
0747 )
0748 {
0749 #if defined( RTEMS_SMP )
0750   return cpu->boot;
0751 #else
0752   (void) cpu;
0753 
0754   return true;
0755 #endif
0756 }
0757 
0758 static inline void _Per_CPU_Acquire_all(
0759   ISR_lock_Context *lock_context
0760 )
0761 {
0762 #if defined(RTEMS_SMP)
0763   uint32_t         cpu_max;
0764   uint32_t         cpu_index;
0765   Per_CPU_Control *previous_cpu;
0766 
0767   cpu_max = _SMP_Get_processor_maximum();
0768   previous_cpu = _Per_CPU_Get_by_index( 0 );
0769 
0770   _ISR_lock_ISR_disable( lock_context );
0771   _Per_CPU_Acquire( previous_cpu, lock_context );
0772 
0773   for ( cpu_index = 1 ; cpu_index < cpu_max ; ++cpu_index ) {
0774      Per_CPU_Control *cpu;
0775 
0776      cpu = _Per_CPU_Get_by_index( cpu_index );
0777      _Per_CPU_Acquire( cpu, &previous_cpu->Lock_context );
0778      previous_cpu = cpu;
0779   }
0780 #else
0781   _ISR_lock_ISR_disable( lock_context );
0782 #endif
0783 }
0784 
0785 static inline void _Per_CPU_Release_all(
0786   ISR_lock_Context *lock_context
0787 )
0788 {
0789 #if defined(RTEMS_SMP)
0790   uint32_t         cpu_max;
0791   uint32_t         cpu_index;
0792   Per_CPU_Control *cpu;
0793 
0794   cpu_max = _SMP_Get_processor_maximum();
0795   cpu = _Per_CPU_Get_by_index( cpu_max - 1 );
0796 
0797   for ( cpu_index = cpu_max - 1 ; cpu_index > 0 ; --cpu_index ) {
0798      Per_CPU_Control *previous_cpu;
0799 
0800      previous_cpu = _Per_CPU_Get_by_index( cpu_index - 1 );
0801      _Per_CPU_Release( cpu, &previous_cpu->Lock_context );
0802      cpu = previous_cpu;
0803   }
0804 
0805   _Per_CPU_Release( cpu, lock_context );
0806   _ISR_lock_ISR_enable( lock_context );
0807 #else
0808   _ISR_lock_ISR_enable( lock_context );
0809 #endif
0810 }
0811 
0812 #if defined( RTEMS_SMP )
0813 
0814 /**
0815  * @brief Gets the current processor state.
0816  *
0817  * @param cpu is the processor control.
0818  *
0819  * @return Returns the current state of the processor.
0820  */
0821 static inline Per_CPU_State _Per_CPU_Get_state( const Per_CPU_Control *cpu )
0822 {
0823   return (Per_CPU_State)
0824     _Atomic_Load_uint( &cpu->state, ATOMIC_ORDER_ACQUIRE );
0825 }
0826 
0827 /**
0828  * @brief Sets the processor state of the current processor.
0829  *
0830  * @param cpu_self is the processor control of the processor executing this
0831  *   function.
0832  *
0833  * @param state is the new processor state.
0834  */
0835 static inline void _Per_CPU_Set_state(
0836   Per_CPU_Control *cpu_self,
0837   Per_CPU_State    state
0838 )
0839 {
0840   _Assert( cpu_self == _Per_CPU_Get() );
0841   _Atomic_Store_uint(
0842     &cpu_self->state,
0843     (unsigned int) state,
0844     ATOMIC_ORDER_RELEASE
0845   );
0846 }
0847 
0848 /**
0849  * @brief Performs the jobs of the specified processor in FIFO order.
0850  *
0851  * @param[in, out] cpu The jobs of this processor will be performed.
0852  */
0853 void _Per_CPU_Perform_jobs( Per_CPU_Control *cpu );
0854 
0855 /**
0856  * @brief Adds the job to the tail of the processing list of the processor.
0857  *
0858  * This function does not send the ::SMP_MESSAGE_PERFORM_JOBS message to the
0859  * processor, see also _Per_CPU_Submit_job().
0860  *
0861  * @param[in, out] cpu The processor to add the job.
0862  * @param[in, out] job The job.  The Per_CPU_Job::context member must be
0863  *   initialized by the caller.
0864  */
0865 void _Per_CPU_Add_job( Per_CPU_Control *cpu, Per_CPU_Job *job );
0866 
0867 /**
0868  * @brief Adds the job to the tail of the processing list of the processor and
0869  *   notifies the processor to process the job.
0870  *
0871  * This function sends the ::SMP_MESSAGE_PERFORM_JOBS message to the processor
0872  * if it is in the ::PER_CPU_STATE_UP state, see also _Per_CPU_Add_job().
0873  *
0874  * @param[in, out] cpu The processor to add the job.
0875  * @param[in, out] job The job.  The Per_CPU_Job::context member must be
0876  *   initialized by the caller.
0877  */
0878 void _Per_CPU_Submit_job( Per_CPU_Control *cpu, Per_CPU_Job *job );
0879 
0880 /**
0881  * @brief Waits for the job carried out by the specified processor.
0882  *
0883  * This function may result in an SMP_FATAL_WRONG_CPU_STATE_TO_PERFORM_JOBS
0884  * fatal error.
0885  *
0886  * @param[in] cpu The processor carrying out the job.
0887  * @param[in] job The job to wait for.
0888  */
0889 void _Per_CPU_Wait_for_job(
0890   const Per_CPU_Control *cpu,
0891   const Per_CPU_Job     *job
0892 );
0893 
0894 #endif /* defined( RTEMS_SMP ) */
0895 
0896 /*
0897  * On a non SMP system, the _SMP_Get_current_processor() is defined to 0.
0898  * Thus when built for non-SMP, there should be no performance penalty.
0899  */
0900 #define _Thread_Dispatch_disable_level \
0901   _Per_CPU_Get()->thread_dispatch_disable_level
0902 #define _Thread_Heir \
0903   _Per_CPU_Get()->heir
0904 
0905 #if defined(_CPU_Get_thread_executing)
0906 #define _Thread_Executing \
0907   _CPU_Get_thread_executing()
0908 #else
0909 #define _Thread_Executing \
0910   _Per_CPU_Get_executing( _Per_CPU_Get() )
0911 #endif
0912 
0913 #define _ISR_Nest_level \
0914   _Per_CPU_Get()->isr_nest_level
0915 #define _CPU_Interrupt_stack_low \
0916   _Per_CPU_Get()->interrupt_stack_low
0917 #define _CPU_Interrupt_stack_high \
0918   _Per_CPU_Get()->interrupt_stack_high
0919 #define _Thread_Dispatch_necessary \
0920   _Per_CPU_Get()->dispatch_necessary
0921 
0922 /**
0923  * @brief Returns the thread control block of the executing thread.
0924  *
0925  * This function can be called in any thread context.  On SMP configurations,
0926  * interrupts are disabled to ensure that the processor index is used
0927  * consistently if no CPU port specific method is available to get the
0928  * executing thread.
0929  *
0930  * @return The thread control block of the executing thread.
0931  */
0932 static inline struct _Thread_Control *_Thread_Get_executing( void )
0933 {
0934   struct _Thread_Control *executing;
0935 
0936   #if defined(RTEMS_SMP) && !defined(_CPU_Get_thread_executing)
0937     ISR_Level level;
0938 
0939     _ISR_Local_disable( level );
0940   #endif
0941 
0942   executing = _Thread_Executing;
0943 
0944   #if defined(RTEMS_SMP) && !defined(_CPU_Get_thread_executing)
0945     _ISR_Local_enable( level );
0946   #endif
0947 
0948   return executing;
0949 }
0950 
0951 /**@}*/
0952 
0953 #endif /* !defined( ASM ) */
0954 
0955 #if defined( ASM ) || defined( _RTEMS_PERCPU_DEFINE_OFFSETS )
0956 
0957 #define PER_CPU_INTERRUPT_STACK_LOW \
0958   CPU_PER_CPU_CONTROL_SIZE
0959 #define PER_CPU_INTERRUPT_STACK_HIGH \
0960   PER_CPU_INTERRUPT_STACK_LOW + CPU_SIZEOF_POINTER
0961 
0962 #define INTERRUPT_STACK_LOW \
0963   (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_LOW)
0964 #define INTERRUPT_STACK_HIGH \
0965   (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_HIGH)
0966 
0967 /*
0968  *  These are the offsets of the required elements in the per CPU table.
0969  */
0970 #define PER_CPU_ISR_NEST_LEVEL \
0971   PER_CPU_INTERRUPT_STACK_HIGH + CPU_SIZEOF_POINTER
0972 #define PER_CPU_ISR_DISPATCH_DISABLE \
0973   PER_CPU_ISR_NEST_LEVEL + 4
0974 #define PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL \
0975   PER_CPU_ISR_DISPATCH_DISABLE + 4
0976 #define PER_CPU_DISPATCH_NEEDED \
0977   PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL + 4
0978 #define PER_CPU_OFFSET_EXECUTING \
0979   PER_CPU_DISPATCH_NEEDED + 4
0980 #define PER_CPU_OFFSET_HEIR \
0981   PER_CPU_OFFSET_EXECUTING + CPU_SIZEOF_POINTER
0982 #if defined(RTEMS_SMP)
0983 #define PER_CPU_INTERRUPT_FRAME_AREA \
0984   PER_CPU_OFFSET_HEIR + CPU_SIZEOF_POINTER
0985 #endif
0986 
0987 #define THREAD_DISPATCH_DISABLE_LEVEL \
0988   (SYM(_Per_CPU_Information) + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL)
0989 #define ISR_NEST_LEVEL \
0990   (SYM(_Per_CPU_Information) + PER_CPU_ISR_NEST_LEVEL)
0991 #define DISPATCH_NEEDED \
0992   (SYM(_Per_CPU_Information) + PER_CPU_DISPATCH_NEEDED)
0993 
0994 #endif /* defined( ASM ) || defined( _RTEMS_PERCPU_DEFINE_OFFSETS ) */
0995 
0996 #ifdef __cplusplus
0997 }
0998 #endif
0999 
1000 #endif
1001 /* end of include file */