File indexing completed on 2025-05-11 08:24:26
0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038 #ifdef HAVE_CONFIG_H
0039 #include "config.h"
0040 #endif
0041
0042 #include <rtems/score/smpimpl.h>
0043 #include <rtems/score/assert.h>
0044
0045 #define _Per_CPU_Jobs_ISR_disable_and_acquire( cpu, lock_context ) \
0046 _ISR_lock_ISR_disable_and_acquire( &( cpu )->Jobs.Lock, lock_context )
0047
0048 #define _Per_CPU_Jobs_release_and_ISR_enable( cpu, lock_context ) \
0049 _ISR_lock_Release_and_ISR_enable( &( cpu )->Jobs.Lock, lock_context )
0050
0051 void _Per_CPU_Perform_jobs( Per_CPU_Control *cpu )
0052 {
0053 ISR_lock_Context lock_context;
0054 Per_CPU_Job *job;
0055
0056 _Per_CPU_Jobs_ISR_disable_and_acquire( cpu, &lock_context );
0057 job = cpu->Jobs.head;
0058 cpu->Jobs.head = NULL;
0059 _Per_CPU_Jobs_release_and_ISR_enable( cpu, &lock_context );
0060
0061 while ( job != NULL ) {
0062 const Per_CPU_Job_context *context;
0063 Per_CPU_Job *next;
0064
0065 context = job->context;
0066 next = job->next;
0067 ( *context->handler )( context->arg );
0068 _Atomic_Store_ulong( &job->done, PER_CPU_JOB_DONE, ATOMIC_ORDER_RELEASE );
0069
0070 job = next;
0071 }
0072 }
0073
0074 void _Per_CPU_Add_job( Per_CPU_Control *cpu, Per_CPU_Job *job )
0075 {
0076 ISR_lock_Context lock_context;
0077
0078 _Assert( job->context != NULL && job->context->handler != NULL );
0079
0080 _Atomic_Store_ulong( &job->done, 0, ATOMIC_ORDER_RELAXED );
0081 _Assert( job->next == NULL );
0082
0083 _Per_CPU_Jobs_ISR_disable_and_acquire( cpu, &lock_context );
0084
0085 if ( cpu->Jobs.head == NULL ) {
0086 cpu->Jobs.head = job;
0087 } else {
0088 *cpu->Jobs.tail = job;
0089 }
0090
0091 cpu->Jobs.tail = &job->next;
0092
0093 _Per_CPU_Jobs_release_and_ISR_enable( cpu, &lock_context );
0094 }
0095
0096 void _Per_CPU_Submit_job( Per_CPU_Control *cpu, Per_CPU_Job *job )
0097 {
0098 _Per_CPU_Add_job( cpu, job );
0099 _SMP_Send_message( cpu, SMP_MESSAGE_PERFORM_JOBS );
0100 }
0101
0102 void _Per_CPU_Wait_for_job(
0103 const Per_CPU_Control *cpu,
0104 const Per_CPU_Job *job
0105 )
0106 {
0107 while (
0108 _Atomic_Load_ulong( &job->done, ATOMIC_ORDER_ACQUIRE )
0109 != PER_CPU_JOB_DONE
0110 ) {
0111 Per_CPU_Control *cpu_self;
0112
0113 switch ( _Per_CPU_Get_state( cpu ) ) {
0114 case PER_CPU_STATE_INITIAL:
0115 case PER_CPU_STATE_READY_TO_START_MULTITASKING:
0116 case PER_CPU_STATE_UP:
0117
0118
0119
0120
0121
0122 cpu_self = _Per_CPU_Get();
0123 _SMP_Try_to_process_message(
0124 cpu_self,
0125 _Atomic_Load_ulong( &cpu_self->message, ATOMIC_ORDER_RELAXED )
0126 );
0127 break;
0128 default:
0129 _SMP_Fatal( SMP_FATAL_WRONG_CPU_STATE_TO_PERFORM_JOBS );
0130 }
0131 }
0132 }