Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:24:43

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /*
0004  * Copyright (C) 2016, 2020 embedded brains GmbH & Co. KG
0005  *
0006  * Redistribution and use in source and binary forms, with or without
0007  * modification, are permitted provided that the following conditions
0008  * are met:
0009  * 1. Redistributions of source code must retain the above copyright
0010  *    notice, this list of conditions and the following disclaimer.
0011  * 2. Redistributions in binary form must reproduce the above copyright
0012  *    notice, this list of conditions and the following disclaimer in the
0013  *    documentation and/or other materials provided with the distribution.
0014  *
0015  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0016  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0017  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0018  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
0019  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0020  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0021  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0022  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0023  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0024  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
0025  * POSSIBILITY OF SUCH DAMAGE.
0026  */
0027 
0028 #ifdef HAVE_CONFIG_H
0029 #include "config.h"
0030 #endif
0031 
0032 #include <tmacros.h>
0033 
0034 #include <rtems.h>
0035 
0036 const char rtems_test_name[] = "SMPSCHEDEDF 2";
0037 
0038 #define CPU_COUNT 2
0039 
0040 #define TASK_COUNT 5
0041 
0042 #define P(i) (UINT32_C(2) + i)
0043 
0044 #define A(cpu0, cpu1) ((cpu1 << 1) | cpu0)
0045 
0046 typedef enum {
0047   T0,
0048   T1,
0049   T2,
0050   T3,
0051   T4,
0052   IDLE
0053 } task_index;
0054 
0055 typedef struct {
0056   enum {
0057     KIND_RESET,
0058     KIND_SET_PRIORITY,
0059     KIND_SET_AFFINITY,
0060     KIND_BLOCK,
0061     KIND_UNBLOCK
0062   } kind;
0063 
0064   task_index index;
0065 
0066   struct {
0067     rtems_task_priority priority;
0068     uint32_t cpu_set;
0069   } data;
0070 
0071   uint8_t expected_cpu_allocations[CPU_COUNT];
0072 } test_action;
0073 
0074 typedef struct {
0075   rtems_id timer_id;
0076   rtems_id master_id;
0077   rtems_id task_ids[TASK_COUNT];
0078   size_t action_index;
0079 } test_context;
0080 
0081 #define RESET \
0082   { \
0083     KIND_RESET, \
0084     0, \
0085     { 0 }, \
0086     { IDLE, IDLE } \
0087   }
0088 
0089 #define SET_PRIORITY(index, prio, cpu0, cpu1) \
0090   { \
0091     KIND_SET_PRIORITY, \
0092     index, \
0093     { .priority = prio }, \
0094     { cpu0, cpu1 } \
0095   }
0096 
0097 #define SET_AFFINITY(index, aff, cpu0, cpu1) \
0098   { \
0099     KIND_SET_AFFINITY, \
0100     index, \
0101     { .cpu_set = aff }, \
0102     { cpu0, cpu1 } \
0103   }
0104 
0105 #define BLOCK(index, cpu0, cpu1) \
0106   { \
0107     KIND_BLOCK, \
0108     index, \
0109     { 0 }, \
0110     { cpu0, cpu1 } \
0111   }
0112 
0113 #define UNBLOCK(index, cpu0, cpu1) \
0114   { \
0115     KIND_UNBLOCK, \
0116     index, \
0117     { 0 }, \
0118     { cpu0, cpu1 } \
0119   }
0120 
0121 static const test_action test_actions[] = {
0122   RESET,
0123   UNBLOCK(      T0,             T0, IDLE),
0124   UNBLOCK(      T1,             T0,   T1),
0125   UNBLOCK(      T3,             T0,   T1),
0126   SET_PRIORITY( T1,  P(2),      T0,   T1),
0127   SET_PRIORITY( T3,  P(1),      T0,   T3),
0128   BLOCK(        T3,             T0,   T1),
0129   SET_AFFINITY( T1,  A(1, 1),   T0,   T1),
0130   SET_AFFINITY( T1,  A(1, 0),   T1,   T0),
0131   SET_AFFINITY( T1,  A(1, 1),   T1,   T0),
0132   SET_AFFINITY( T1,  A(1, 0),   T1,   T0),
0133   SET_AFFINITY( T1,  A(0, 1),   T0,   T1),
0134   BLOCK(        T0,           IDLE,   T1),
0135   UNBLOCK(      T0,             T0,   T1),
0136   BLOCK(        T1,             T0, IDLE),
0137   UNBLOCK(      T1,             T0,   T1),
0138   /*
0139    * Show that FIFO order is honoured across all threads of the same priority.
0140    */
0141   RESET,
0142   SET_PRIORITY( T1,  P(0),    IDLE, IDLE),
0143   SET_PRIORITY( T2,  P(1),    IDLE, IDLE),
0144   SET_PRIORITY( T3,  P(1),    IDLE, IDLE),
0145   SET_AFFINITY( T3,  A(1, 0), IDLE, IDLE),
0146   SET_PRIORITY( T4,  P(1),    IDLE, IDLE),
0147   SET_AFFINITY( T4,  A(1, 0), IDLE, IDLE),
0148   UNBLOCK(      T0,             T0, IDLE),
0149   UNBLOCK(      T1,             T0,   T1),
0150   UNBLOCK(      T2,             T0,   T1),
0151   UNBLOCK(      T3,             T0,   T1),
0152   UNBLOCK(      T4,             T0,   T1),
0153   BLOCK(        T1,             T0,   T2),
0154   BLOCK(        T2,             T3,   T0),
0155   BLOCK(        T3,             T4,   T0),
0156   /*
0157    * Schedule a high priority affine thread directly with a low priority affine
0158    * thread in the corresponding ready queue.  In this case we, remove the
0159    * affine ready queue in _Scheduler_EDF_SMP_Allocate_processor().
0160    */
0161   RESET,
0162   UNBLOCK(      T0,             T0, IDLE),
0163   UNBLOCK(      T1,             T0,   T1),
0164   SET_PRIORITY( T1,  P(2),      T0,   T1),
0165   SET_AFFINITY( T3,  A(0, 1),   T0,   T1),
0166   UNBLOCK(      T3,             T0,   T1),
0167   SET_PRIORITY( T2,  P(1),      T0,   T1),
0168   SET_AFFINITY( T2,  A(0, 1),   T0,   T1),
0169   UNBLOCK(      T2,             T0,   T2),
0170   BLOCK(        T1,             T0,   T2),
0171   BLOCK(        T2,             T0,   T3),
0172   /* Force migration of a higher priority one-to-all thread */
0173   RESET,
0174   UNBLOCK(      T0,             T0, IDLE),
0175   SET_AFFINITY( T1,  A(1, 0),   T0, IDLE),
0176   UNBLOCK(      T1,             T1,   T0),
0177   /*
0178    * Block a one-to-one thread while having a non-empty affine ready queue on
0179    * the same processor.
0180    */
0181   RESET,
0182   SET_AFFINITY( T1,  A(1, 0), IDLE, IDLE),
0183   SET_AFFINITY( T3,  A(1, 0), IDLE, IDLE),
0184   UNBLOCK(      T0,             T0, IDLE),
0185   UNBLOCK(      T1,             T1,   T0),
0186   UNBLOCK(      T2,             T1,   T0),
0187   UNBLOCK(      T3,             T1,   T0),
0188   BLOCK(        T1,             T2,   T0),
0189   BLOCK(        T0,             T3,   T2),
0190   /*
0191    * Make sure that a one-to-one thread does not get the wrong processor
0192    * allocated after selecting the highest ready thread.
0193    */
0194   RESET,
0195   SET_AFFINITY( T1,  A(1, 0), IDLE, IDLE),
0196   SET_AFFINITY( T2,  A(1, 0), IDLE, IDLE),
0197   UNBLOCK(      T0,             T0, IDLE),
0198   UNBLOCK(      T1,             T1,   T0),
0199   UNBLOCK(      T2,             T1,   T0),
0200   BLOCK(        T0,             T1, IDLE),
0201   RESET
0202 };
0203 
0204 static test_context test_instance;
0205 
0206 static void set_priority(rtems_id id, rtems_task_priority prio)
0207 {
0208   rtems_status_code sc;
0209 
0210   sc = rtems_task_set_priority(id, prio, &prio);
0211   rtems_test_assert(sc == RTEMS_SUCCESSFUL);
0212 }
0213 
0214 static void set_affinity(rtems_id id, uint32_t cpu_set_32)
0215 {
0216   rtems_status_code sc;
0217   cpu_set_t cpu_set;
0218   size_t i;
0219 
0220   CPU_ZERO(&cpu_set);
0221 
0222   for (i = 0; i < CPU_COUNT; ++i) {
0223     if ((cpu_set_32 & (UINT32_C(1) << i)) != 0) {
0224       CPU_SET(i, &cpu_set);
0225     }
0226   }
0227 
0228   sc = rtems_task_set_affinity(id, sizeof(cpu_set), &cpu_set);
0229   rtems_test_assert(sc == RTEMS_SUCCESSFUL);
0230 }
0231 
0232 /*
0233  * The goal of the reset() function is to bring back a defined initial system
0234  * state for each test case.  All tasks of the test shall be suspended.  The
0235  * idle threads shall be ordered in the scheduled chain according to the CPU
0236  * index.
0237  */
0238 static void reset(test_context *ctx)
0239 {
0240   rtems_status_code sc;
0241   size_t i;
0242 
0243   for (i = 0; i < TASK_COUNT; ++i) {
0244     set_priority(ctx->task_ids[i], P(i));
0245     set_affinity(ctx->task_ids[i], A(1, 1));
0246   }
0247 
0248   for (i = CPU_COUNT; i < TASK_COUNT; ++i) {
0249     sc = rtems_task_suspend(ctx->task_ids[i]);
0250     rtems_test_assert(sc == RTEMS_SUCCESSFUL || sc == RTEMS_ALREADY_SUSPENDED);
0251   }
0252 
0253   for (i = 0; i < CPU_COUNT; ++i) {
0254     sc = rtems_task_resume(ctx->task_ids[i]);
0255     rtems_test_assert(sc == RTEMS_SUCCESSFUL || sc == RTEMS_INCORRECT_STATE);
0256   }
0257 
0258   /*
0259    * Order the idle threads explicitly.  Test cases may move the idle threads
0260    * around.  We have to ensure that the idle threads are ordered according to
0261    * the CPU index, otherwise the processor allocations cannot be specified for
0262    * a test case.  The idle threads of a scheduler have all the same priority,
0263    * so we have to take the FIFO ordering within a priority group into account.
0264    */
0265   for (i = 0; i < CPU_COUNT; ++i) {
0266     const Per_CPU_Control *c;
0267     const Thread_Control *h;
0268 
0269     c = _Per_CPU_Get_by_index(CPU_COUNT - 1 - i);
0270     h = c->heir;
0271 
0272     sc = rtems_task_suspend(h->Object.id);
0273     rtems_test_assert(sc == RTEMS_SUCCESSFUL);
0274   }
0275 }
0276 
0277 static void check_cpu_allocations(test_context *ctx, const test_action *action)
0278 {
0279   size_t i;
0280 
0281   for (i = 0; i < CPU_COUNT; ++i) {
0282     task_index e;
0283     const Per_CPU_Control *c;
0284     const Thread_Control *h;
0285 
0286     e = action->expected_cpu_allocations[i];
0287     c = _Per_CPU_Get_by_index(i);
0288     h = c->heir;
0289 
0290     if (e != IDLE) {
0291       rtems_test_assert(h->Object.id == ctx->task_ids[e]);
0292     } else {
0293       rtems_test_assert(h->is_idle);
0294     }
0295   }
0296 }
0297 
0298 /*
0299  * Use a timer to execute the actions, since it runs with thread dispatching
0300  * disabled.  This is necessary to check the expected processor allocations.
0301  */
0302 static void timer(rtems_id id, void *arg)
0303 {
0304   test_context *ctx;
0305   rtems_status_code sc;
0306   size_t i;
0307 
0308   ctx = arg;
0309   i = ctx->action_index;
0310 
0311   if (i == 0) {
0312     sc = rtems_task_suspend(ctx->master_id);
0313     rtems_test_assert(sc == RTEMS_SUCCESSFUL);
0314   }
0315 
0316   if (i < RTEMS_ARRAY_SIZE(test_actions)) {
0317     const test_action *action = &test_actions[i];
0318     rtems_id task;
0319 
0320     ctx->action_index = i + 1;
0321 
0322     task = ctx->task_ids[action->index];
0323 
0324     switch (action->kind) {
0325       case KIND_SET_PRIORITY:
0326         set_priority(task, action->data.priority);
0327         break;
0328       case KIND_SET_AFFINITY:
0329         set_affinity(task, action->data.cpu_set);
0330         break;
0331       case KIND_BLOCK:
0332         sc = rtems_task_suspend(task);
0333         rtems_test_assert(sc == RTEMS_SUCCESSFUL);
0334         break;
0335       case KIND_UNBLOCK:
0336         sc = rtems_task_resume(task);
0337         rtems_test_assert(sc == RTEMS_SUCCESSFUL);
0338         break;
0339       default:
0340         rtems_test_assert(action->kind == KIND_RESET);
0341         reset(ctx);
0342         break;
0343     }
0344 
0345     check_cpu_allocations(ctx, action);
0346 
0347     sc = rtems_timer_reset(id);
0348     rtems_test_assert(sc == RTEMS_SUCCESSFUL);
0349   } else {
0350     sc = rtems_task_resume(ctx->master_id);
0351     rtems_test_assert(sc == RTEMS_SUCCESSFUL);
0352 
0353     sc = rtems_event_transient_send(ctx->master_id);
0354     rtems_test_assert(sc == RTEMS_SUCCESSFUL);
0355   }
0356 }
0357 
0358 static void do_nothing_task(rtems_task_argument arg)
0359 {
0360   (void) arg;
0361 
0362   while (true) {
0363     /* Do nothing */
0364   }
0365 }
0366 
0367 static void test(void)
0368 {
0369   test_context *ctx;
0370   rtems_status_code sc;
0371   size_t i;
0372 
0373   ctx = &test_instance;
0374 
0375   ctx->master_id = rtems_task_self();
0376 
0377   for (i = 0; i < TASK_COUNT; ++i) {
0378     sc = rtems_task_create(
0379       rtems_build_name(' ', ' ', 'T', '0' + i),
0380       P(i),
0381       RTEMS_MINIMUM_STACK_SIZE,
0382       RTEMS_DEFAULT_MODES,
0383       RTEMS_DEFAULT_ATTRIBUTES,
0384       &ctx->task_ids[i]
0385     );
0386     rtems_test_assert(sc == RTEMS_SUCCESSFUL);
0387 
0388     sc = rtems_task_start(ctx->task_ids[i], do_nothing_task, 0);
0389     rtems_test_assert(sc == RTEMS_SUCCESSFUL);
0390   }
0391 
0392   sc = rtems_timer_create(
0393     rtems_build_name('A', 'C', 'T', 'N'),
0394     &ctx->timer_id
0395   );
0396   rtems_test_assert(sc == RTEMS_SUCCESSFUL);
0397 
0398   sc = rtems_timer_fire_after(ctx->timer_id, 1, timer, ctx);
0399   rtems_test_assert(sc == RTEMS_SUCCESSFUL);
0400 
0401   sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT);
0402   rtems_test_assert(sc == RTEMS_SUCCESSFUL);
0403 
0404   for (i = 0; i < TASK_COUNT; ++i) {
0405     sc = rtems_task_delete(ctx->task_ids[i]);
0406     rtems_test_assert(sc == RTEMS_SUCCESSFUL);
0407   }
0408 
0409   sc = rtems_timer_delete(ctx->timer_id);
0410   rtems_test_assert(sc == RTEMS_SUCCESSFUL);
0411 }
0412 
0413 static void Init(rtems_task_argument arg)
0414 {
0415   TEST_BEGIN();
0416 
0417   if (rtems_scheduler_get_processor_maximum() == CPU_COUNT) {
0418     test();
0419   } else {
0420     puts("warning: wrong processor count to run the test");
0421   }
0422 
0423   TEST_END();
0424   rtems_test_exit(0);
0425 }
0426 
0427 #define CONFIGURE_MICROSECONDS_PER_TICK 1000
0428 
0429 #define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
0430 #define CONFIGURE_APPLICATION_NEEDS_SIMPLE_CONSOLE_DRIVER
0431 
0432 #define CONFIGURE_MAXIMUM_TASKS (1 + TASK_COUNT)
0433 #define CONFIGURE_MAXIMUM_TIMERS 1
0434 
0435 #define CONFIGURE_MAXIMUM_PROCESSORS CPU_COUNT
0436 
0437 #define CONFIGURE_SCHEDULER_EDF_SMP
0438 
0439 #define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION
0440 
0441 #define CONFIGURE_RTEMS_INIT_TASKS_TABLE
0442 
0443 #define CONFIGURE_INIT
0444 
0445 #include <rtems/confdefs.h>