Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:24:26

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /**
0004  * @file
0005  *
0006  * @ingroup RTEMSScorePerCPU
0007  *
0008  * @brief This source file contains the implementation of _Per_CPU_Add_job(),
0009  *   _Per_CPU_Perform_jobs(), _Per_CPU_Submit_job(), and
0010  *   _Per_CPU_Wait_for_job().
0011  */
0012 
0013 /*
0014  * Copyright (C) 2019 embedded brains GmbH & Co. KG
0015  *
0016  * Redistribution and use in source and binary forms, with or without
0017  * modification, are permitted provided that the following conditions
0018  * are met:
0019  * 1. Redistributions of source code must retain the above copyright
0020  *    notice, this list of conditions and the following disclaimer.
0021  * 2. Redistributions in binary form must reproduce the above copyright
0022  *    notice, this list of conditions and the following disclaimer in the
0023  *    documentation and/or other materials provided with the distribution.
0024  *
0025  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0026  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0027  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0028  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
0029  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0030  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0031  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0032  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0033  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0034  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
0035  * POSSIBILITY OF SUCH DAMAGE.
0036  */
0037 
0038 #ifdef HAVE_CONFIG_H
0039 #include "config.h"
0040 #endif
0041 
0042 #include <rtems/score/smpimpl.h>
0043 #include <rtems/score/assert.h>
0044 
0045 #define _Per_CPU_Jobs_ISR_disable_and_acquire( cpu, lock_context ) \
0046   _ISR_lock_ISR_disable_and_acquire( &( cpu )->Jobs.Lock, lock_context )
0047 
0048 #define _Per_CPU_Jobs_release_and_ISR_enable( cpu, lock_context ) \
0049   _ISR_lock_Release_and_ISR_enable( &( cpu )->Jobs.Lock, lock_context )
0050 
0051 void _Per_CPU_Perform_jobs( Per_CPU_Control *cpu )
0052 {
0053   ISR_lock_Context  lock_context;
0054   Per_CPU_Job      *job;
0055 
0056   _Per_CPU_Jobs_ISR_disable_and_acquire( cpu, &lock_context );
0057   job = cpu->Jobs.head;
0058   cpu->Jobs.head = NULL;
0059   _Per_CPU_Jobs_release_and_ISR_enable( cpu, &lock_context );
0060 
0061   while ( job != NULL ) {
0062     const Per_CPU_Job_context *context;
0063     Per_CPU_Job               *next;
0064 
0065     context = job->context;
0066     next = job->next;
0067     ( *context->handler )( context->arg );
0068     _Atomic_Store_ulong( &job->done, PER_CPU_JOB_DONE, ATOMIC_ORDER_RELEASE );
0069 
0070     job = next;
0071   }
0072 }
0073 
0074 void _Per_CPU_Add_job( Per_CPU_Control *cpu, Per_CPU_Job *job )
0075 {
0076   ISR_lock_Context lock_context;
0077 
0078   _Assert( job->context != NULL && job->context->handler != NULL );
0079 
0080   _Atomic_Store_ulong( &job->done, 0, ATOMIC_ORDER_RELAXED );
0081   _Assert( job->next == NULL );
0082 
0083   _Per_CPU_Jobs_ISR_disable_and_acquire( cpu, &lock_context );
0084 
0085   if ( cpu->Jobs.head == NULL ) {
0086     cpu->Jobs.head = job;
0087   } else {
0088     *cpu->Jobs.tail = job;
0089   }
0090 
0091   cpu->Jobs.tail = &job->next;
0092 
0093   _Per_CPU_Jobs_release_and_ISR_enable( cpu, &lock_context );
0094 }
0095 
0096 void _Per_CPU_Submit_job( Per_CPU_Control *cpu, Per_CPU_Job *job )
0097 {
0098   _Per_CPU_Add_job( cpu, job );
0099   _SMP_Send_message( cpu, SMP_MESSAGE_PERFORM_JOBS );
0100 }
0101 
0102 void _Per_CPU_Wait_for_job(
0103   const Per_CPU_Control *cpu,
0104   const Per_CPU_Job     *job
0105 )
0106 {
0107   while (
0108     _Atomic_Load_ulong( &job->done, ATOMIC_ORDER_ACQUIRE )
0109       != PER_CPU_JOB_DONE
0110   ) {
0111     Per_CPU_Control *cpu_self;
0112 
0113     switch ( _Per_CPU_Get_state( cpu ) ) {
0114       case PER_CPU_STATE_INITIAL:
0115       case PER_CPU_STATE_READY_TO_START_MULTITASKING:
0116       case PER_CPU_STATE_UP:
0117         /*
0118          * Calling this function with the current processor is intentional.  We
0119          * have to perform our own jobs here in case inter-processor interrupts
0120          * are not working.
0121          */
0122         cpu_self = _Per_CPU_Get();
0123         _SMP_Try_to_process_message(
0124           cpu_self,
0125           _Atomic_Load_ulong( &cpu_self->message, ATOMIC_ORDER_RELAXED )
0126         );
0127         break;
0128       default:
0129         _SMP_Fatal( SMP_FATAL_WRONG_CPU_STATE_TO_PERFORM_JOBS );
0130     }
0131   }
0132 }