Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:24:43

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /*
0004  * Copyright (C) 2014, 2024 embedded brains GmbH & Co. KG
0005  *
0006  * Redistribution and use in source and binary forms, with or without
0007  * modification, are permitted provided that the following conditions
0008  * are met:
0009  * 1. Redistributions of source code must retain the above copyright
0010  *    notice, this list of conditions and the following disclaimer.
0011  * 2. Redistributions in binary form must reproduce the above copyright
0012  *    notice, this list of conditions and the following disclaimer in the
0013  *    documentation and/or other materials provided with the distribution.
0014  *
0015  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0016  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0017  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0018  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
0019  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0020  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0021  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0022  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0023  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0024  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
0025  * POSSIBILITY OF SUCH DAMAGE.
0026  */
0027 
0028 #ifdef HAVE_CONFIG_H
0029 #include "config.h"
0030 #endif
0031 
0032 #include <rtems/score/smpimpl.h>
0033 #include <rtems/score/smpbarrier.h>
0034 #include <rtems.h>
0035 
0036 #include <stdio.h>
0037 
0038 #include "tmacros.h"
0039 
0040 const char rtems_test_name[] = "SMPIPI 1";
0041 
0042 #define CPU_COUNT 32
0043 
0044 typedef struct {
0045   uint32_t value;
0046   uint32_t cache_line_separation[31];
0047 } test_counter;
0048 
0049 typedef struct {
0050   test_counter counters[CPU_COUNT];
0051   uint32_t copy_counters[CPU_COUNT];
0052   SMP_barrier_Control barrier;
0053   SMP_barrier_State main_barrier_state;
0054   SMP_barrier_State worker_barrier_state;
0055   Per_CPU_Job jobs[CPU_COUNT][2];
0056   Per_CPU_Job sync_jobs[2];
0057 } test_context;
0058 
0059 static test_context test_instance = {
0060   .barrier = SMP_BARRIER_CONTROL_INITIALIZER,
0061   .main_barrier_state = SMP_BARRIER_STATE_INITIALIZER,
0062   .worker_barrier_state = SMP_BARRIER_STATE_INITIALIZER
0063 };
0064 
0065 static void barrier(
0066   test_context *ctx,
0067   SMP_barrier_State *state
0068 )
0069 {
0070   _SMP_barrier_Wait(&ctx->barrier, state, 2);
0071 }
0072 
0073 static void barrier_1_handler(void *arg)
0074 {
0075   test_context *ctx = arg;
0076   uint32_t cpu_index_self = _SMP_Get_current_processor();
0077   SMP_barrier_State *bs = &ctx->worker_barrier_state;
0078 
0079   ++ctx->counters[cpu_index_self].value;
0080 
0081   /* (D) */
0082   barrier(ctx, bs);
0083 }
0084 
0085 static const Per_CPU_Job_context barrier_1_job_context = {
0086   .handler = barrier_1_handler,
0087   .arg = &test_instance
0088 };
0089 
0090 static void barrier_0_handler(void *arg)
0091 {
0092   test_context *ctx = arg;
0093   uint32_t cpu_index_self = _SMP_Get_current_processor();
0094   SMP_barrier_State *bs = &ctx->worker_barrier_state;
0095 
0096   ++ctx->counters[cpu_index_self].value;
0097 
0098   /* (A) */
0099   barrier(ctx, bs);
0100 
0101   /* (B) */
0102   barrier(ctx, bs);
0103 
0104   /* (C) */
0105   barrier(ctx, bs);
0106 
0107   ctx->jobs[0][1].context = &barrier_1_job_context;
0108   _Per_CPU_Add_job(_Per_CPU_Get(), &ctx->jobs[0][1]);
0109 }
0110 
0111 static const Per_CPU_Job_context barrier_0_job_context = {
0112   .handler = barrier_0_handler,
0113   .arg = &test_instance
0114 };
0115 
0116 static void test_send_message_while_processing_a_message(
0117   test_context *ctx,
0118   uint32_t cpu_index_self,
0119   uint32_t cpu_count
0120 )
0121 {
0122   SMP_barrier_State *bs = &ctx->main_barrier_state;
0123   uint32_t cpu_index;
0124   rtems_status_code sc;
0125   cpu_set_t cpuset;
0126 
0127   rtems_test_assert(cpu_index_self < CPU_SETSIZE);
0128   CPU_ZERO(&cpuset);
0129   CPU_SET((int) cpu_index_self, &cpuset);
0130   sc = rtems_task_set_affinity(RTEMS_SELF, sizeof(cpuset), &cpuset);
0131   rtems_test_assert(sc == RTEMS_SUCCESSFUL);
0132 
0133   for (cpu_index = 0; cpu_index < cpu_count; ++cpu_index) {
0134     if (cpu_index != cpu_index_self) {
0135       Per_CPU_Control *cpu_self;
0136 
0137       ctx->jobs[0][0].context = &barrier_0_job_context;
0138       _Per_CPU_Submit_job(_Per_CPU_Get_by_index(cpu_index), &ctx->jobs[0][0]);
0139 
0140       /* (A) */
0141       barrier(ctx, bs);
0142 
0143       rtems_test_assert(ctx->counters[cpu_index].value == 1);
0144       _SMP_Send_message(
0145         _Per_CPU_Get_by_index(cpu_index),
0146         SMP_MESSAGE_PERFORM_JOBS
0147       );
0148 
0149       /* (B) */
0150       barrier(ctx, bs);
0151 
0152       rtems_test_assert(ctx->counters[cpu_index].value == 1);
0153 
0154       /* (C) */
0155       barrier(ctx, bs);
0156 
0157       /* (D) */
0158       barrier(ctx, bs);
0159 
0160       rtems_test_assert(ctx->counters[cpu_index].value == 2);
0161 
0162       ctx->counters[cpu_index].value = 0;
0163 
0164       /* Ensure that the second job is done and can be reused */
0165       cpu_self = _Thread_Dispatch_disable();
0166       _Per_CPU_Wait_for_job(_Per_CPU_Get_by_index(cpu_index), &ctx->jobs[0][1]);
0167       _Thread_Dispatch_enable(cpu_self);
0168     }
0169   }
0170 }
0171 
0172 static void counter_handler(void *arg, size_t next_job)
0173 {
0174   test_context *ctx = arg;
0175   Per_CPU_Control *cpu_self = _Per_CPU_Get();
0176   uint32_t cpu_index_self = _Per_CPU_Get_index(cpu_self);
0177 
0178   ++ctx->counters[cpu_index_self].value;
0179   _Per_CPU_Add_job(cpu_self, &ctx->jobs[cpu_index_self][next_job]);
0180 }
0181 
0182 static void counter_0_handler(void *arg)
0183 {
0184   counter_handler(arg, 1);
0185 }
0186 
0187 static const Per_CPU_Job_context counter_0_job_context = {
0188   .handler = counter_0_handler,
0189   .arg = &test_instance
0190 };
0191 
0192 static void counter_1_handler(void *arg)
0193 {
0194   counter_handler(arg, 0);
0195 }
0196 
0197 static const Per_CPU_Job_context counter_1_job_context = {
0198   .handler = counter_1_handler,
0199   .arg = &test_instance
0200 };
0201 
0202 static void sync_0_handler(void *arg)
0203 {
0204   test_context *ctx = arg;
0205 
0206   _Per_CPU_Submit_job(_Per_CPU_Get(), &ctx->sync_jobs[1]);
0207 
0208   /* (E) */
0209   barrier(ctx, &ctx->worker_barrier_state);
0210 }
0211 
0212 static void sync_1_handler(void *arg)
0213 {
0214   test_context *ctx = arg;
0215 
0216   /* (F) */
0217   barrier(ctx, &ctx->worker_barrier_state);
0218 }
0219 
0220 static const Per_CPU_Job_context sync_0_context = {
0221   .handler = sync_0_handler,
0222   .arg = &test_instance
0223 };
0224 
0225 static const Per_CPU_Job_context sync_1_context = {
0226   .handler = sync_1_handler,
0227   .arg = &test_instance
0228 };
0229 
0230 static void wait_for_ipi_done(test_context *ctx, Per_CPU_Control *cpu)
0231 {
0232   unsigned long done;
0233 
0234   ctx->sync_jobs[0].context = &sync_0_context;
0235   ctx->sync_jobs[1].context = &sync_1_context;
0236   _Per_CPU_Submit_job(cpu, &ctx->sync_jobs[0]);
0237 
0238   /*
0239    * (E)
0240    *
0241    * At this point, the IPI is currently serviced.  Depending on the target and
0242    * timing conditions, the IPI may be active and pending.  The main processor
0243    * will no longer make this IPI pending after this point.  Let the
0244    * sync_0_handler() make it pending again to go to (F).
0245    */
0246   barrier(ctx, &ctx->main_barrier_state);
0247 
0248   /* (F) */
0249   barrier(ctx, &ctx->main_barrier_state);
0250 
0251   /* Make sure that a potential counter_handler() finished */
0252   while (cpu->isr_nest_level != 0) {
0253     RTEMS_COMPILER_MEMORY_BARRIER();
0254   }
0255 
0256   done = _Atomic_Load_ulong( &ctx->sync_jobs[1].done, ATOMIC_ORDER_ACQUIRE );
0257   rtems_test_assert( done == PER_CPU_JOB_DONE );
0258 }
0259 
0260 static void test_send_message_flood(
0261   test_context *ctx,
0262   uint32_t cpu_count
0263 )
0264 {
0265   uint32_t cpu_index_self = rtems_scheduler_get_processor();
0266   uint32_t cpu_index;
0267 
0268   for (cpu_index = 0; cpu_index < cpu_count; ++cpu_index) {
0269     Per_CPU_Control *cpu = _Per_CPU_Get_by_index(cpu_index);
0270 
0271     ctx->jobs[cpu_index][0].context = &counter_0_job_context;
0272     ctx->jobs[cpu_index][1].context = &counter_1_job_context;
0273     _Per_CPU_Add_job(cpu, &ctx->jobs[cpu_index][0]);
0274   }
0275 
0276   for (cpu_index = 0; cpu_index < cpu_count; ++cpu_index) {
0277     Per_CPU_Control *cpu;
0278     uint32_t i;
0279 
0280     cpu = _Per_CPU_Get_by_index(cpu_index);
0281 
0282     for (i = 0; i < cpu_count; ++i) {
0283       if (i != cpu_index) {
0284         ctx->copy_counters[i] = ctx->counters[i].value;
0285       }
0286     }
0287 
0288     for (i = 0; i < 100000; ++i) {
0289       _SMP_Send_message(cpu, SMP_MESSAGE_PERFORM_JOBS);
0290     }
0291 
0292     if (cpu_index != cpu_index_self) {
0293       wait_for_ipi_done(ctx, cpu);
0294     }
0295 
0296     for (i = 0; i < cpu_count; ++i) {
0297       if (i != cpu_index) {
0298         rtems_test_assert(ctx->copy_counters[i] == ctx->counters[i].value);
0299       }
0300     }
0301   }
0302 
0303   for (cpu_index = 0; cpu_index < cpu_count; ++cpu_index) {
0304     rtems_test_assert(
0305       _Processor_mask_Is_set(_SMP_Get_online_processors(), cpu_index)
0306     );
0307 
0308     printf(
0309       "inter-processor interrupts for processor %"
0310         PRIu32 "%s: %" PRIu32 "\n",
0311       cpu_index,
0312       cpu_index == cpu_index_self ? " (main)" : "",
0313       ctx->counters[cpu_index].value
0314     );
0315   }
0316 
0317   for (; cpu_index < CPU_COUNT; ++cpu_index) {
0318     rtems_test_assert(
0319       !_Processor_mask_Is_set(_SMP_Get_online_processors(), cpu_index)
0320     );
0321   }
0322 }
0323 
0324 static void test(void)
0325 {
0326   test_context *ctx = &test_instance;
0327   uint32_t cpu_count = rtems_scheduler_get_processor_maximum();
0328   uint32_t cpu_index_self;
0329 
0330   for (cpu_index_self = 0; cpu_index_self < cpu_count; ++cpu_index_self) {
0331     test_send_message_while_processing_a_message(ctx, cpu_index_self, cpu_count);
0332   }
0333 
0334   test_send_message_flood(ctx, cpu_count);
0335 }
0336 
0337 static void Init(rtems_task_argument arg)
0338 {
0339   TEST_BEGIN();
0340 
0341   test();
0342 
0343   TEST_END();
0344   rtems_test_exit(0);
0345 }
0346 
0347 #define CONFIGURE_APPLICATION_DOES_NOT_NEED_CLOCK_DRIVER
0348 #define CONFIGURE_APPLICATION_NEEDS_SIMPLE_CONSOLE_DRIVER
0349 
0350 #define CONFIGURE_MAXIMUM_PROCESSORS CPU_COUNT
0351 
0352 #define CONFIGURE_MAXIMUM_TASKS 1
0353 
0354 #define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION
0355 
0356 #define CONFIGURE_RTEMS_INIT_TASKS_TABLE
0357 
0358 #define CONFIGURE_INIT
0359 
0360 #include <rtems/confdefs.h>