Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:24:21

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /**
0004  * @file
0005  *
0006  * @ingroup RTEMSTestFrameworkImpl
0007  *
0008  * @brief This source file contains the implementation of
0009  *   T_interrupt_test().
0010  */
0011 
0012 /*
0013  * Copyright (C) 2020 embedded brains GmbH & Co. KG
0014  *
0015  * Redistribution and use in source and binary forms, with or without
0016  * modification, are permitted provided that the following conditions
0017  * are met:
0018  * 1. Redistributions of source code must retain the above copyright
0019  *    notice, this list of conditions and the following disclaimer.
0020  * 2. Redistributions in binary form must reproduce the above copyright
0021  *    notice, this list of conditions and the following disclaimer in the
0022  *    documentation and/or other materials provided with the distribution.
0023  *
0024  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0025  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0026  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0027  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
0028  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0029  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0030  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0031  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0032  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0033  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
0034  * POSSIBILITY OF SUCH DAMAGE.
0035  */
0036 
0037 #ifdef HAVE_CONFIG_H
0038 #include "config.h"
0039 #endif
0040 
0041 #include <rtems/test.h>
0042 
0043 #include <rtems/score/atomic.h>
0044 #include <rtems/score/percpu.h>
0045 #include <rtems/score/thread.h>
0046 #include <rtems/score/timecounter.h>
0047 #include <rtems/score/timestampimpl.h>
0048 #include <rtems/score/userextimpl.h>
0049 #include <rtems/score/watchdogimpl.h>
0050 
0051 #ifdef RTEMS_SMP
0052 #include <rtems/score/smpimpl.h>
0053 #endif
0054 
0055 typedef T_interrupt_test_state (*T_interrupt_test_handler)(void *);
0056 
0057 #define T_INTERRUPT_SAMPLE_COUNT 8
0058 
0059 typedef struct {
0060     uint_fast32_t one_tick_busy;
0061     int64_t t0;
0062     Thread_Control *self;
0063     Atomic_Uint state;
0064     void (*prepare)(void *);
0065     void (*action)(void *);
0066     T_interrupt_test_state (*interrupt)(void *);
0067     void (*blocked)(void *);
0068     void *arg;
0069 #ifdef RTEMS_SMP
0070     Per_CPU_Job job;
0071     Per_CPU_Job_context job_context;
0072 #endif
0073     Watchdog_Control wdg;
0074     User_extensions_Control ext;
0075     T_fixture_node node;
0076 } T_interrupt_context;
0077 
0078 typedef struct {
0079     int64_t t;
0080     int64_t d;
0081 } T_interrupt_clock_time;
0082 
0083 static void
0084 T_interrupt_sort(T_interrupt_clock_time *ct, size_t n)
0085 {
0086     size_t i;
0087 
0088     /* Bubble sort */
0089     for (i = 1; i < n ; ++i) {
0090         size_t j;
0091 
0092         for (j = 0; j < n - i; ++j) {
0093              if (ct[j].d > ct[j + 1].d) {
0094                 T_interrupt_clock_time tmp;
0095 
0096                 tmp = ct[j];
0097                 ct[j] = ct[j + 1];
0098                 ct[j + 1] = tmp;
0099              }
0100         }
0101     }
0102 }
0103 
0104 static int64_t
0105 T_interrupt_time_close_to_tick(void)
0106 {
0107     Watchdog_Interval c0;
0108     Watchdog_Interval c1;
0109     T_interrupt_clock_time ct[12];
0110     Timestamp_Control t;
0111     int32_t ns_per_tick;
0112     size_t i;
0113     size_t n;
0114 
0115     ns_per_tick = (int32_t)_Watchdog_Nanoseconds_per_tick;
0116     n = RTEMS_ARRAY_SIZE(ct);
0117     c0 = _Watchdog_Ticks_since_boot;
0118 
0119     for (i = 0; i < n; ++i) {
0120         do {
0121             c1 = _Watchdog_Ticks_since_boot;
0122             t = _Timecounter_Sbinuptime();
0123         } while (c0 == c1);
0124 
0125         c0 = c1;
0126         ct[i].t = sbttons(t);
0127     }
0128 
0129     for (i = 1; i < n; ++i) {
0130         int64_t d;
0131 
0132         d = (ct[i].t - ct[1].t) % ns_per_tick;
0133 
0134         if (d > ns_per_tick / 2) {
0135             d -= ns_per_tick;
0136         }
0137 
0138         ct[i].d = d;
0139     }
0140 
0141     /*
0142      * Use the median and not the arithmetic mean since on simulator
0143      * platforms there may be outliers.
0144      */
0145     T_interrupt_sort(&ct[1], n - 1);
0146     return ct[1 + (n - 1) / 2].t;
0147 }
0148 
0149 static void
0150 T_interrupt_watchdog(Watchdog_Control *wdg)
0151 {
0152     T_interrupt_context *ctx;
0153     ISR_Level level;
0154     T_interrupt_test_state state;
0155     unsigned int expected;
0156 
0157     ctx = RTEMS_CONTAINER_OF(wdg, T_interrupt_context, wdg);
0158 
0159     _ISR_Local_disable(level);
0160     _Watchdog_Per_CPU_insert_ticks(&ctx->wdg,
0161         _Watchdog_Get_CPU(&ctx->wdg), 1);
0162     _ISR_Local_enable(level);
0163 
0164     state = (*ctx->interrupt)(ctx->arg);
0165 
0166     expected = T_INTERRUPT_TEST_ACTION;
0167     _Atomic_Compare_exchange_uint(&ctx->state, &expected,
0168         state, ATOMIC_ORDER_RELAXED, ATOMIC_ORDER_RELAXED);
0169 }
0170 
0171 static void
0172 T_interrupt_watchdog_insert(T_interrupt_context *ctx)
0173 {
0174     ISR_Level level;
0175 
0176     _ISR_Local_disable(level);
0177     _Watchdog_Per_CPU_insert_ticks(&ctx->wdg, _Per_CPU_Get(), 1);
0178     _ISR_Local_enable(level);
0179 }
0180 
0181 static void
0182 T_interrupt_watchdog_remove(T_interrupt_context *ctx)
0183 {
0184     ISR_Level level;
0185 
0186     _ISR_Local_disable(level);
0187     _Watchdog_Per_CPU_remove_ticks(&ctx->wdg);
0188     _ISR_Local_enable(level);
0189 }
0190 
0191 static void
0192 T_interrupt_init_once(T_interrupt_context *ctx)
0193 {
0194     ctx->t0 = T_interrupt_time_close_to_tick();
0195     ctx->one_tick_busy = T_get_one_clock_tick_busy();
0196 }
0197 
0198 static T_interrupt_test_state
0199 T_interrupt_continue(void *arg)
0200 {
0201     (void)arg;
0202     return T_INTERRUPT_TEST_CONTINUE;
0203 }
0204 
0205 static void
0206 T_interrupt_do_nothing(void *arg)
0207 {
0208     (void)arg;
0209 }
0210 
0211 #ifdef RTEMS_SMP
0212 static void
0213 T_interrupt_blocked(void *arg)
0214 {
0215     T_interrupt_context *ctx;
0216 
0217     ctx = arg;
0218     (*ctx->blocked)(ctx->arg);
0219 }
0220 #endif
0221 
0222 static void T_interrupt_thread_switch(Thread_Control *, Thread_Control *);
0223 
0224 static T_interrupt_context T_interrupt_instance = {
0225     .interrupt = T_interrupt_continue,
0226     .blocked = T_interrupt_do_nothing,
0227 #ifdef RTEMS_SMP
0228     .job = {
0229         .context = &T_interrupt_instance.job_context
0230     },
0231     .job_context = {
0232         .handler = T_interrupt_blocked,
0233         .arg = &T_interrupt_instance
0234     },
0235 #endif
0236     .wdg = WATCHDOG_INITIALIZER(T_interrupt_watchdog),
0237     .ext = {
0238         .Callouts = {
0239             .thread_switch = T_interrupt_thread_switch
0240         }
0241     }
0242 };
0243 
0244 T_interrupt_test_state
0245 T_interrupt_test_change_state(T_interrupt_test_state expected_state,
0246     T_interrupt_test_state desired_state)
0247 {
0248     T_interrupt_context *ctx;
0249     unsigned int expected;
0250 
0251     ctx = &T_interrupt_instance;
0252     expected = expected_state;
0253     _Atomic_Compare_exchange_uint(&ctx->state, &expected,
0254         desired_state, ATOMIC_ORDER_RELAXED, ATOMIC_ORDER_RELAXED);
0255 
0256     return (T_interrupt_test_state)expected;
0257 }
0258 
0259 T_interrupt_test_state
0260 T_interrupt_test_get_state(void)
0261 {
0262     T_interrupt_context *ctx;
0263 
0264     ctx = &T_interrupt_instance;
0265     return (T_interrupt_test_state)_Atomic_Load_uint(&ctx->state,
0266         ATOMIC_ORDER_RELAXED);
0267 }
0268 
0269 void
0270 T_interrupt_test_busy_wait_for_interrupt(void)
0271 {
0272     T_interrupt_context *ctx;
0273     unsigned int state;
0274 
0275     ctx = &T_interrupt_instance;
0276 
0277     do {
0278         state = _Atomic_Load_uint(&ctx->state, ATOMIC_ORDER_RELAXED);
0279     } while (state == T_INTERRUPT_TEST_ACTION);
0280 }
0281 
0282 static void
0283 T_interrupt_thread_switch(Thread_Control *executing, Thread_Control *heir)
0284 {
0285     T_interrupt_context *ctx;
0286 
0287     (void)heir;
0288     ctx = &T_interrupt_instance;
0289 
0290     if (ctx->self == executing) {
0291         T_interrupt_test_state state;
0292 
0293         state = (T_interrupt_test_state)_Atomic_Load_uint(&ctx->state,
0294             ATOMIC_ORDER_RELAXED);
0295 
0296         if (state != T_INTERRUPT_TEST_INITIAL) {
0297 #ifdef RTEMS_SMP
0298             Per_CPU_Control *cpu_self;
0299 
0300             /*
0301              * In SMP configurations, the thread switch extension
0302              * runs in a very restricted environment.  Interrupts
0303              * are disabled and the caller owns the per-CPU lock.
0304              * In order to avoid deadlocks at SMP lock level, we
0305              * have to use an SMP job which runs later in the
0306              * context of the inter-processor interrupt.
0307              */
0308             cpu_self = _Per_CPU_Get();
0309             _Per_CPU_Submit_job(cpu_self, &ctx->job);
0310 #else
0311             (*ctx->blocked)(ctx->arg);
0312 #endif
0313         }
0314     }
0315 }
0316 
0317 static T_interrupt_context *
0318 T_interrupt_setup(const T_interrupt_test_config *config, void *arg)
0319 {
0320     T_interrupt_context *ctx;
0321 
0322     T_quiet_assert_not_null(config->action);
0323     T_quiet_assert_not_null(config->interrupt);
0324     ctx = &T_interrupt_instance;
0325     ctx->self = _Thread_Get_executing();
0326     ctx->arg = arg;
0327     ctx->interrupt = config->interrupt;
0328 
0329     if (config->blocked != NULL) {
0330         ctx->blocked = config->blocked;
0331     }
0332 
0333     if (ctx->t0 == 0) {
0334         T_interrupt_init_once(ctx);
0335     }
0336 
0337     _User_extensions_Add_set(&ctx->ext);
0338     T_interrupt_watchdog_insert(ctx);
0339     return ctx;
0340 }
0341 
0342 static void
0343 T_interrupt_teardown(void *arg)
0344 {
0345     T_interrupt_context *ctx;
0346 
0347     ctx = arg;
0348     ctx->interrupt = T_interrupt_continue;
0349     ctx->blocked = T_interrupt_do_nothing;
0350     T_interrupt_watchdog_remove(ctx);
0351     _User_extensions_Remove_set(&ctx->ext);
0352     ctx->self = NULL;
0353     ctx->arg = NULL;
0354 }
0355 
0356 static const T_fixture T_interrupt_fixture = {
0357     .teardown = T_interrupt_teardown,
0358     .initial_context = &T_interrupt_instance
0359 };
0360 
0361 T_interrupt_test_state
0362 T_interrupt_test(const T_interrupt_test_config *config, void *arg)
0363 {
0364     T_interrupt_context *ctx;
0365     uint_fast32_t lower_bound[T_INTERRUPT_SAMPLE_COUNT];
0366     uint_fast32_t upper_bound[T_INTERRUPT_SAMPLE_COUNT];
0367     uint_fast32_t lower_sum;
0368     uint_fast32_t upper_sum;
0369     int32_t ns_per_tick;
0370     size_t sample;
0371     uint32_t iter;
0372 
0373     ctx = T_interrupt_setup(config, arg);
0374     T_push_fixture(&ctx->node, &T_interrupt_fixture);
0375     ns_per_tick = (int32_t)_Watchdog_Nanoseconds_per_tick;
0376     lower_sum = 0;
0377     upper_sum = T_INTERRUPT_SAMPLE_COUNT * ctx->one_tick_busy;
0378 
0379     for (sample = 0; sample < T_INTERRUPT_SAMPLE_COUNT; ++sample) {
0380         lower_bound[sample] = 0;
0381         upper_bound[sample] = ctx->one_tick_busy;
0382     }
0383 
0384     sample = 0;
0385 
0386     for (iter = 0; iter < config->max_iteration_count; ++iter) {
0387         T_interrupt_test_state state;
0388         int64_t t;
0389         int64_t d;
0390         Timestamp_Control s1;
0391         Timestamp_Control s0;
0392         uint_fast32_t busy;
0393         uint_fast32_t delta;
0394 
0395         if (config->prepare != NULL) {
0396             (*config->prepare)(arg);
0397         }
0398 
0399         /*
0400          * We use some sort of a damped bisection to find the right
0401          * interrupt time point.
0402          */
0403         busy = (lower_sum + upper_sum) /
0404             (2 * T_INTERRUPT_SAMPLE_COUNT);
0405 
0406         t = sbttons(_Timecounter_Sbinuptime());
0407         d = (t - ctx->t0) % ns_per_tick;
0408         t += ns_per_tick / 4 - d;
0409 
0410         if (d > ns_per_tick / 8) {
0411             t += ns_per_tick;
0412         }
0413 
0414         /*
0415          * The s1 value is a future time point close to 25% of a clock
0416          * tick interval.
0417          */
0418         s1 = nstosbt(t);
0419 
0420         /*
0421          * The path from here to the action call must avoid anything
0422          * which can cause jitters.  We wait until 25% of the clock
0423          * tick interval are elapsed using the timecounter.  Then we do
0424          * a busy wait and call the action.  The interrupt time point
0425          * is controlled by the busy count.
0426          */
0427 
0428         do {
0429             s0 = _Timecounter_Sbinuptime();
0430         } while (s0 < s1);
0431 
0432         _Atomic_Store_uint(&ctx->state, T_INTERRUPT_TEST_ACTION,
0433             ATOMIC_ORDER_RELAXED);
0434         T_busy(busy);
0435         (*config->action)(arg);
0436 
0437         state = (T_interrupt_test_state)
0438             _Atomic_Exchange_uint(&ctx->state,
0439             T_INTERRUPT_TEST_INITIAL, ATOMIC_ORDER_RELAXED);
0440 
0441         if (state == T_INTERRUPT_TEST_DONE) {
0442             break;
0443         }
0444 
0445         /* Adjust the lower/upper bound of the bisection interval */
0446         if (state == T_INTERRUPT_TEST_EARLY) {
0447             uint_fast32_t lower;
0448 
0449             upper_sum -= upper_bound[sample];
0450             upper_sum += busy;
0451             upper_bound[sample] = busy;
0452 
0453             /* Round down to make sure no underflow happens */
0454             lower = lower_bound[sample];
0455             delta = lower / 32;
0456             lower_sum -= delta;
0457             lower_bound[sample] = lower - delta;
0458 
0459             sample = (sample + 1) % T_INTERRUPT_SAMPLE_COUNT;
0460         } else if (state == T_INTERRUPT_TEST_LATE ||
0461             state == T_INTERRUPT_TEST_ACTION) {
0462             uint_fast32_t upper;
0463 
0464             /*
0465              * If the state is T_INTERRUPT_TEST_ACTION, then there
0466              * was probably no interrupt during the action, so the
0467              * interrupt would be late.
0468              */
0469 
0470             lower_sum -= lower_bound[sample];
0471             lower_sum += busy;
0472             lower_bound[sample] = busy;
0473 
0474             /*
0475              * The one tick busy count value is not really
0476              * trustable on some platforms.  Allow the upper bound
0477              * to grow over this value in time.
0478              */
0479             upper = upper_bound[sample];
0480             delta = (upper + 31) / 32;
0481             upper_sum += delta;
0482             upper_bound[sample] = upper + delta;
0483 
0484             sample = (sample + 1) % T_INTERRUPT_SAMPLE_COUNT;
0485         }
0486     }
0487 
0488     T_pop_fixture();
0489 
0490     if (iter == config->max_iteration_count) {
0491         return T_INTERRUPT_TEST_TIMEOUT;
0492     }
0493 
0494     return T_INTERRUPT_TEST_DONE;
0495 }