File indexing completed on 2025-05-11 08:24:21
0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037 #include <rtems/test.h>
0038
0039 #include <alloca.h>
0040 #include <inttypes.h>
0041 #include <stdlib.h>
0042 #include <string.h>
0043
0044 #include <rtems.h>
0045 #include <rtems/score/assert.h>
0046
0047 #define WAKEUP_EVENT RTEMS_EVENT_0
0048
0049 typedef struct {
0050 struct T_measure_runtime_context *master;
0051 rtems_id id;
0052 volatile unsigned int *chunk;
0053 } load_context;
0054
0055 struct T_measure_runtime_context {
0056 size_t sample_count;
0057 T_ticks *samples;
0058 size_t cache_line_size;
0059 size_t chunk_size;
0060 volatile unsigned int *chunk;
0061 rtems_id runner;
0062 uint32_t load_count;
0063 load_context *load_contexts;
0064 #ifdef RTEMS_SMP
0065 cpu_set_t cpus;
0066 #endif
0067 };
0068
0069 static unsigned int
0070 dirty_data_cache(volatile unsigned int *chunk, size_t chunk_size,
0071 size_t cache_line_size, unsigned int token)
0072 {
0073 size_t m;
0074 size_t k;
0075 size_t i;
0076
0077 m = chunk_size / sizeof(chunk[0]);
0078 k = cache_line_size / sizeof(chunk[0]);
0079
0080 for (i = 0; i < m; i += k) {
0081 chunk[i] = i + token;
0082 }
0083
0084 return i + token;
0085 }
0086
0087 static void
0088 wait_for_worker(void)
0089 {
0090 rtems_event_set events;
0091 rtems_status_code sc;
0092
0093 sc = rtems_event_receive(WAKEUP_EVENT, RTEMS_EVENT_ALL | RTEMS_WAIT,
0094 RTEMS_NO_TIMEOUT, &events);
0095 _Assert(sc == RTEMS_SUCCESSFUL);
0096 (void)sc;
0097 }
0098
0099 static void
0100 wakeup_master(const T_measure_runtime_context *ctx)
0101 {
0102 rtems_status_code sc;
0103
0104 sc = rtems_event_send(ctx->runner, WAKEUP_EVENT);
0105 _Assert(sc == RTEMS_SUCCESSFUL);
0106 (void)sc;
0107 }
0108
0109 static void
0110 suspend_worker(const load_context *lctx)
0111 {
0112 rtems_status_code sc;
0113
0114 sc = rtems_task_suspend(lctx->id);
0115 _Assert(sc == RTEMS_SUCCESSFUL);
0116 (void)sc;
0117 }
0118
0119 static void
0120 restart_worker(const load_context *lctx)
0121 {
0122 rtems_status_code sc;
0123
0124 sc = rtems_task_restart(lctx->id, (rtems_task_argument)lctx);
0125 _Assert(sc == RTEMS_SUCCESSFUL);
0126 (void)sc;
0127 wait_for_worker();
0128 }
0129
0130 static void
0131 load_worker(rtems_task_argument arg)
0132 {
0133 const load_context *lctx;
0134 T_measure_runtime_context *ctx;
0135 unsigned int token;
0136 volatile unsigned int *chunk;
0137 size_t chunk_size;
0138 size_t cache_line_size;
0139
0140 lctx = (const load_context *)arg;
0141 ctx = lctx->master;
0142 chunk = lctx->chunk;
0143 chunk_size = ctx->chunk_size;
0144 cache_line_size = ctx->cache_line_size;
0145 token = (unsigned int)rtems_scheduler_get_processor();
0146
0147 token = dirty_data_cache(chunk, chunk_size, cache_line_size, token);
0148 wakeup_master(ctx);
0149
0150 while (true) {
0151 token = dirty_data_cache(chunk, chunk_size, cache_line_size,
0152 token);
0153 }
0154 }
0155
0156 static void
0157 destroy(void *ptr)
0158 {
0159 const T_measure_runtime_context *ctx;
0160 uint32_t load;
0161 rtems_status_code sc;
0162
0163 ctx = ptr;
0164
0165 for (load = 0; load < ctx->load_count; ++load) {
0166 const load_context *lctx;
0167
0168 lctx = &ctx->load_contexts[load];
0169
0170
0171 if (lctx->id != 0) {
0172 sc = rtems_task_delete(lctx->id);
0173 _Assert(sc == RTEMS_SUCCESSFUL);
0174 (void)sc;
0175 }
0176 }
0177
0178 #ifdef RTEMS_SMP
0179 sc = rtems_task_set_affinity(RTEMS_SELF, sizeof(ctx->cpus),
0180 &ctx->cpus);
0181 _Assert(sc == RTEMS_SUCCESSFUL);
0182 (void)sc;
0183 #endif
0184 }
0185
0186 static void *
0187 add_offset(const volatile void *p, uintptr_t o)
0188 {
0189 return (void *)((uintptr_t)p + o);
0190 }
0191
0192 static void *
0193 align_up(const volatile void *p, uintptr_t a)
0194 {
0195 return (void *)RTEMS_ALIGN_UP((uintptr_t)p, a);
0196 }
0197
0198 T_measure_runtime_context *
0199 T_measure_runtime_create(const T_measure_runtime_config *config)
0200 {
0201 T_measure_runtime_context *ctx;
0202 size_t sample_size;
0203 size_t cache_line_size;
0204 size_t chunk_size;
0205 size_t load_size;
0206 uint32_t load_count;
0207 uint32_t i;
0208 rtems_status_code sc;
0209 #ifdef RTEMS_SMP
0210 cpu_set_t cpu;
0211 #endif
0212
0213 sample_size = config->sample_count * sizeof(ctx->samples[0]);
0214
0215 cache_line_size = rtems_cache_get_data_line_size();
0216
0217 if (cache_line_size == 0) {
0218 cache_line_size = 8;
0219 }
0220
0221 chunk_size = rtems_cache_get_data_cache_size(0);
0222
0223 if (chunk_size == 0) {
0224 chunk_size = cache_line_size;
0225 }
0226
0227 chunk_size *= 2;
0228
0229 load_count = rtems_scheduler_get_processor_maximum();
0230 load_size = load_count * sizeof(ctx->load_contexts[0]);
0231
0232 ctx = T_zalloc(sizeof(*ctx) + sample_size + load_size + chunk_size +
0233 2 * cache_line_size, destroy);
0234
0235 if (ctx == NULL) {
0236 return NULL;
0237 }
0238
0239 #ifdef RTEMS_SMP
0240 sc = rtems_task_get_affinity(RTEMS_SELF, sizeof(ctx->cpus),
0241 &ctx->cpus);
0242 _Assert(sc == RTEMS_SUCCESSFUL);
0243 (void)sc;
0244 CPU_ZERO(&cpu);
0245 CPU_SET(0, &cpu);
0246 sc = rtems_task_set_affinity(RTEMS_SELF, sizeof(cpu), &cpu);
0247 _Assert(sc == RTEMS_SUCCESSFUL || sc == RTEMS_INVALID_NUMBER);
0248 (void)sc;
0249 #endif
0250
0251 ctx->sample_count = config->sample_count;
0252 ctx->samples = add_offset(ctx, sizeof(*ctx));
0253 ctx->samples = align_up(ctx->samples, cache_line_size);
0254 ctx->cache_line_size = cache_line_size;
0255 ctx->chunk_size = chunk_size;
0256 ctx->chunk = add_offset(ctx->samples, sample_size);
0257 ctx->chunk = align_up(ctx->chunk, cache_line_size);
0258 ctx->runner = rtems_task_self();
0259 ctx->load_count = load_count;
0260 ctx->load_contexts = add_offset(ctx->chunk, chunk_size);
0261
0262 for (i = 0; i < load_count; ++i) {
0263 rtems_id id;
0264 load_context *lctx;
0265 rtems_task_priority max_prio;
0266 rtems_id scheduler;
0267
0268 sc = rtems_scheduler_ident_by_processor(i, &scheduler);
0269 if (sc != RTEMS_SUCCESSFUL) {
0270 continue;
0271 }
0272
0273
0274 sc = rtems_task_create(rtems_build_name('L', 'O', 'A', 'D'),
0275 1, RTEMS_MINIMUM_STACK_SIZE, RTEMS_DEFAULT_MODES,
0276 RTEMS_DEFAULT_ATTRIBUTES, &id);
0277 if (sc != RTEMS_SUCCESSFUL) {
0278 return NULL;
0279 }
0280
0281 lctx = &ctx->load_contexts[i];
0282 lctx->master = ctx;
0283 lctx->id = id;
0284
0285 lctx->chunk = T_malloc(chunk_size);
0286 if (lctx->chunk == NULL) {
0287 lctx->chunk = ctx->chunk;
0288 }
0289
0290 sc = rtems_scheduler_get_maximum_priority(scheduler, &max_prio);
0291 _Assert(sc == RTEMS_SUCCESSFUL);
0292 (void)sc;
0293 sc = rtems_task_set_scheduler(id, scheduler, max_prio - 1);
0294 _Assert(sc == RTEMS_SUCCESSFUL);
0295 (void)sc;
0296
0297 #ifdef RTEMS_SMP
0298 CPU_ZERO(&cpu);
0299 CPU_SET((int)i, &cpu);
0300 sc = rtems_task_set_affinity(id, sizeof(cpu), &cpu);
0301 _Assert(sc == RTEMS_SUCCESSFUL || sc == RTEMS_INVALID_NUMBER);
0302 (void)sc;
0303 #endif
0304
0305 sc = rtems_task_start(id, load_worker,
0306 (rtems_task_argument)lctx);
0307 _Assert(sc == RTEMS_SUCCESSFUL);
0308 (void)sc;
0309
0310 wait_for_worker();
0311 suspend_worker(lctx);
0312 }
0313
0314 return ctx;
0315 }
0316
0317 static int
0318 cmp(const void *ap, const void *bp)
0319 {
0320 T_ticks a;
0321 T_ticks b;
0322
0323 a = *(const T_ticks *)ap;
0324 b = *(const T_ticks *)bp;
0325
0326 if (a < b) {
0327 return -1;
0328 } else if (a > b) {
0329 return 1;
0330 } else {
0331 return 0;
0332 }
0333 }
0334
0335 static void
0336 measure_variant_begin(const char *name, const char *variant)
0337 {
0338 T_printf("M:B:%s\n", name);
0339 T_printf("M:V:%s\n", variant);
0340 }
0341
0342 static T_time
0343 accumulate(const T_ticks *samples, size_t sample_count)
0344 {
0345 T_time a;
0346 size_t i;
0347
0348 a = 0;
0349
0350 for (i = 0; i < sample_count; ++i) {
0351 a += T_ticks_to_time(samples[i]);
0352 }
0353
0354 return a;
0355 }
0356
0357 static T_ticks
0358 median_absolute_deviation(T_ticks *samples, size_t sample_count)
0359 {
0360 T_ticks median;
0361 size_t i;
0362
0363 median = samples[sample_count / 2];
0364
0365 for (i = 0; i < sample_count / 2; ++i) {
0366 samples[i] = median - samples[i];
0367 }
0368
0369 for (; i < sample_count; ++i) {
0370 samples[i] = samples[i] - median;
0371 }
0372
0373 qsort(samples, sample_count, sizeof(samples[0]), cmp);
0374 return samples[sample_count / 2];
0375 }
0376
0377 static void
0378 report_sorted_samples(const T_measure_runtime_context *ctx)
0379 {
0380 size_t sample_count;
0381 const T_ticks *samples;
0382 T_time_string ts;
0383 T_ticks last;
0384 T_ticks v;
0385 size_t count;
0386 size_t i;
0387
0388 sample_count = ctx->sample_count;
0389 samples = ctx->samples;
0390 last = samples[0];
0391 v = samples[0];
0392 count = 1;
0393
0394 for (i = 1; i < sample_count; ++i) {
0395 v = samples[i];
0396
0397 if (v != last) {
0398 uint32_t sa;
0399 uint32_t sb;
0400 uint32_t nsa;
0401 uint32_t nsb;
0402 T_time t;
0403
0404 t = T_ticks_to_time(last);
0405 T_time_to_seconds_and_nanoseconds(t, &sa, &nsa);
0406 T_time_to_seconds_and_nanoseconds(T_ticks_to_time(v),
0407 &sb, &nsb);
0408
0409 if (sa != sb || nsa != nsb) {
0410 T_printf("M:S:%zu:%s\n", count,
0411 T_time_to_string_ns(t, ts));
0412 count = 1;
0413 } else {
0414 ++count;
0415 }
0416
0417 last = v;
0418 } else {
0419 ++count;
0420 }
0421 }
0422
0423 if (count > 0) {
0424 T_printf("M:S:%zu:%s\n", count,
0425 T_ticks_to_string_ns(v, ts));
0426 }
0427 }
0428
0429 static void
0430 measure_variant_end(const T_measure_runtime_context *ctx,
0431 const T_measure_runtime_request *req, T_time begin)
0432 {
0433 size_t sample_count;
0434 T_ticks *samples;
0435 T_time_string ts;
0436 T_time d;
0437 T_ticks v;
0438 T_time a;
0439
0440 sample_count = ctx->sample_count;
0441 samples = ctx->samples;
0442 d = T_now() - begin;
0443 a = accumulate(samples, sample_count);
0444 qsort(samples, sample_count, sizeof(samples[0]), cmp);
0445 T_printf("M:N:%zu\n", sample_count);
0446
0447 if ((req->flags & T_MEASURE_RUNTIME_REPORT_SAMPLES) != 0) {
0448 report_sorted_samples(ctx);
0449 }
0450
0451 v = samples[0];
0452 T_printf("M:MI:%s\n", T_ticks_to_string_ns(v, ts));
0453 v = samples[(1 * sample_count) / 100];
0454 T_printf("M:P1:%s\n", T_ticks_to_string_ns(v, ts));
0455 v = samples[(1 * sample_count) / 4];
0456 T_printf("M:Q1:%s\n", T_ticks_to_string_ns(v, ts));
0457 v = samples[sample_count / 2];
0458 T_printf("M:Q2:%s\n", T_ticks_to_string_ns(v, ts));
0459 v = samples[(3 * sample_count) / 4];
0460 T_printf("M:Q3:%s\n", T_ticks_to_string_ns(v, ts));
0461 v = samples[(99 * sample_count) / 100];
0462 T_printf("M:P99:%s\n", T_ticks_to_string_ns(v, ts));
0463 v = samples[sample_count - 1];
0464 T_printf("M:MX:%s\n", T_ticks_to_string_ns(v, ts));
0465 v = median_absolute_deviation(samples, sample_count);
0466 T_printf("M:MAD:%s\n", T_ticks_to_string_ns(v, ts));
0467 T_printf("M:D:%s\n", T_time_to_string_ns(a, ts));
0468 T_printf("M:E:%s:D:%s\n", req->name, T_time_to_string_ns(d, ts));
0469 }
0470
0471 static void
0472 fill_data_cache(volatile unsigned int *chunk, size_t chunk_size,
0473 size_t cache_line_size)
0474 {
0475 size_t m;
0476 size_t k;
0477 size_t i;
0478
0479 m = chunk_size / sizeof(chunk[0]);
0480 k = cache_line_size / sizeof(chunk[0]);
0481
0482 for (i = 0; i < m; i += k) {
0483 chunk[i];
0484 }
0485 }
0486
0487 static void
0488 dirty_call(void (*body)(void *), void *arg)
0489 {
0490 void *space;
0491
0492
0493 space = alloca(1024);
0494 RTEMS_OBFUSCATE_VARIABLE(space);
0495
0496 (*body)(arg);
0497 }
0498
0499 static void
0500 setup(const T_measure_runtime_request *req, void *arg)
0501 {
0502 if (req->setup != NULL) {
0503 (*req->setup)(arg);
0504 }
0505 }
0506
0507 static bool
0508 teardown(const T_measure_runtime_request *req, void *arg, T_ticks *delta,
0509 uint32_t tic, uint32_t toc, unsigned int retry,
0510 unsigned int maximum_retries)
0511 {
0512 if (req->teardown == NULL) {
0513 return tic == toc || retry >= maximum_retries;
0514 }
0515
0516 return (*req->teardown)(arg, delta, tic, toc, retry);
0517 }
0518
0519 static unsigned int
0520 get_maximum_retries(const T_measure_runtime_request *req)
0521 {
0522 return (req->flags & T_MEASURE_RUNTIME_ALLOW_CLOCK_ISR) != 0 ? 1 : 0;
0523 }
0524
0525 static void
0526 measure_full_cache(T_measure_runtime_context *ctx,
0527 const T_measure_runtime_request *req)
0528 {
0529 size_t sample_count;
0530 T_ticks *samples;
0531 void (*body)(void *);
0532 void *arg;
0533 size_t i;
0534 T_time begin;
0535
0536 measure_variant_begin(req->name, "FullCache");
0537 begin = T_now();
0538 sample_count = ctx->sample_count;
0539 samples = ctx->samples;
0540 body = req->body;
0541 arg = req->arg;
0542
0543 for (i = 0; i < sample_count; ++i) {
0544 unsigned int maximum_retries;
0545 unsigned int retry;
0546
0547 maximum_retries = get_maximum_retries(req);
0548 retry = 0;
0549
0550 while (true) {
0551 rtems_interval tic;
0552 rtems_interval toc;
0553 T_ticks t0;
0554 T_ticks t1;
0555
0556 setup(req, arg);
0557 fill_data_cache(ctx->chunk, ctx->chunk_size,
0558 ctx->cache_line_size);
0559
0560 tic = rtems_clock_get_ticks_since_boot();
0561 t0 = T_tick();
0562 (*body)(arg);
0563 t1 = T_tick();
0564 toc = rtems_clock_get_ticks_since_boot();
0565 samples[i] = t1 - t0;
0566
0567 if (teardown(req, arg, &samples[i], tic, toc, retry,
0568 maximum_retries)) {
0569 break;
0570 }
0571
0572 ++retry;
0573 }
0574 }
0575
0576 measure_variant_end(ctx, req, begin);
0577 }
0578
0579 static void
0580 measure_hot_cache(T_measure_runtime_context *ctx,
0581 const T_measure_runtime_request *req)
0582 {
0583 size_t sample_count;
0584 T_ticks *samples;
0585 void (*body)(void *);
0586 void *arg;
0587 size_t i;
0588 T_time begin;
0589
0590 measure_variant_begin(req->name, "HotCache");
0591 begin = T_now();
0592 sample_count = ctx->sample_count;
0593 samples = ctx->samples;
0594 body = req->body;
0595 arg = req->arg;
0596
0597 for (i = 0; i < sample_count; ++i) {
0598 unsigned int maximum_retries;
0599 unsigned int retry;
0600
0601 maximum_retries = get_maximum_retries(req);
0602 retry = 0;
0603
0604 while (true) {
0605 rtems_interval tic;
0606 rtems_interval toc;
0607 T_ticks t0;
0608 T_ticks t1;
0609
0610 setup(req, arg);
0611
0612 tic = rtems_clock_get_ticks_since_boot();
0613 t0 = T_tick();
0614 (*body)(arg);
0615 t1 = T_tick();
0616 toc = rtems_clock_get_ticks_since_boot();
0617 samples[i] = t1 - t0;
0618
0619 (void)teardown(req, arg, &samples[i], tic, toc, retry,
0620 0);
0621 setup(req, arg);
0622
0623 tic = rtems_clock_get_ticks_since_boot();
0624 t0 = T_tick();
0625 (*body)(arg);
0626 t1 = T_tick();
0627 toc = rtems_clock_get_ticks_since_boot();
0628 samples[i] = t1 - t0;
0629
0630 if (teardown(req, arg, &samples[i], tic, toc, retry,
0631 maximum_retries)) {
0632 break;
0633 }
0634
0635 ++retry;
0636 }
0637 }
0638
0639 measure_variant_end(ctx, req, begin);
0640 }
0641
0642 static void
0643 measure_dirty_cache(T_measure_runtime_context *ctx,
0644 const T_measure_runtime_request *req)
0645 {
0646 size_t sample_count;
0647 T_ticks *samples;
0648 void (*body)(void *);
0649 void *arg;
0650 size_t i;
0651 T_time begin;
0652 size_t token;
0653
0654 measure_variant_begin(req->name, "DirtyCache");
0655 begin = T_now();
0656 sample_count = ctx->sample_count;
0657 samples = ctx->samples;
0658 body = req->body;
0659 arg = req->arg;
0660 token = 0;
0661
0662 for (i = 0; i < sample_count; ++i) {
0663 unsigned int maximum_retries;
0664 unsigned int retry;
0665
0666 maximum_retries = get_maximum_retries(req);
0667 retry = 0;
0668
0669 while (true) {
0670 rtems_interval tic;
0671 rtems_interval toc;
0672 T_ticks t0;
0673 T_ticks t1;
0674
0675 setup(req, arg);
0676 token = dirty_data_cache(ctx->chunk, ctx->chunk_size,
0677 ctx->cache_line_size, token);
0678 rtems_cache_invalidate_entire_instruction();
0679
0680 tic = rtems_clock_get_ticks_since_boot();
0681 t0 = T_tick();
0682 dirty_call(body, arg);
0683 t1 = T_tick();
0684 toc = rtems_clock_get_ticks_since_boot();
0685 samples[i] = t1 - t0;
0686
0687 if (teardown(req, arg, &samples[i], tic, toc, retry,
0688 maximum_retries)) {
0689 break;
0690 }
0691
0692 ++retry;
0693 }
0694 }
0695
0696 measure_variant_end(ctx, req, begin);
0697 }
0698
0699 #ifdef __sparc__
0700
0701
0702
0703
0704
0705 static T_ticks
0706 recursive_load_call(void (*body)(void *), void *arg, int n)
0707 {
0708 T_ticks delta;
0709
0710 RTEMS_OBFUSCATE_VARIABLE(n);
0711
0712 if (n > 0) {
0713 delta = recursive_load_call(body, arg, n - 1);
0714 } else {
0715 T_ticks t0;
0716 T_ticks t1;
0717
0718 t0 = T_tick();
0719 dirty_call(body, arg);
0720 t1 = T_tick();
0721
0722 delta = t1 - t0;
0723 }
0724
0725 RTEMS_OBFUSCATE_VARIABLE(delta);
0726 return delta;
0727 }
0728 #else
0729 static T_ticks
0730 load_call(void (*body)(void *), void *arg)
0731 {
0732 T_ticks t0;
0733 T_ticks t1;
0734
0735 t0 = T_tick();
0736 dirty_call(body, arg);
0737 t1 = T_tick();
0738
0739 return t1 - t0;
0740 }
0741 #endif
0742
0743 static void
0744 measure_load_variant(T_measure_runtime_context *ctx,
0745 const T_measure_runtime_request *req,
0746 const load_context *lctx, uint32_t load)
0747 {
0748 size_t sample_count;
0749 T_ticks *samples;
0750 void (*body)(void *);
0751 void *arg;
0752 size_t i;
0753 T_time begin;
0754 size_t token;
0755
0756 T_printf("M:B:%s\n", req->name);
0757 T_printf("M:V:Load/%" PRIu32 "\n", load + 1);
0758 begin = T_now();
0759 sample_count = ctx->sample_count;
0760 samples = ctx->samples;
0761 body = req->body;
0762 arg = req->arg;
0763 token = 0;
0764
0765 restart_worker(lctx);
0766
0767 for (i = 0; i < sample_count; ++i) {
0768 unsigned int maximum_retries;
0769 unsigned int retry;
0770
0771 maximum_retries = get_maximum_retries(req);
0772 retry = 0;
0773
0774 while (true) {
0775 rtems_interval tic;
0776 rtems_interval toc;
0777 T_ticks delta;
0778
0779 setup(req, arg);
0780 token = dirty_data_cache(ctx->chunk, ctx->chunk_size,
0781 ctx->cache_line_size, token);
0782 rtems_cache_invalidate_entire_instruction();
0783
0784 tic = rtems_clock_get_ticks_since_boot();
0785 #ifdef __sparc__
0786 delta = recursive_load_call(body, arg,
0787 SPARC_NUMBER_OF_REGISTER_WINDOWS - 3);
0788 #else
0789 delta = load_call(body, arg);
0790 #endif
0791 toc = rtems_clock_get_ticks_since_boot();
0792 samples[i] = delta;
0793
0794 if (teardown(req, arg, &samples[i], tic, toc, retry,
0795 maximum_retries)) {
0796 break;
0797 }
0798
0799 ++retry;
0800 }
0801 }
0802
0803 measure_variant_end(ctx, req, begin);
0804 }
0805
0806 static void
0807 measure_load(T_measure_runtime_context *ctx,
0808 const T_measure_runtime_request *req)
0809 {
0810 const load_context *lctx;
0811 uint32_t load;
0812
0813 #ifdef RTEMS_SMP
0814 for (load = 0; load < ctx->load_count - 1; ++load) {
0815 lctx = &ctx->load_contexts[load];
0816
0817 if (lctx->id != 0) {
0818 if ((req->flags &
0819 T_MEASURE_RUNTIME_DISABLE_MINOR_LOAD) == 0) {
0820 measure_load_variant(ctx, req, lctx, load);
0821 } else {
0822 restart_worker(lctx);
0823 }
0824 }
0825 }
0826 #endif
0827
0828 if ((req->flags & T_MEASURE_RUNTIME_DISABLE_MAX_LOAD) == 0) {
0829 load = ctx->load_count - 1;
0830 lctx = &ctx->load_contexts[load];
0831
0832 if (lctx->id != 0) {
0833 measure_load_variant(ctx, req, lctx, load);
0834 }
0835 }
0836
0837 for (load = 0; load < ctx->load_count; ++load) {
0838 lctx = &ctx->load_contexts[load];
0839
0840 if (lctx->id != 0) {
0841 suspend_worker(lctx);
0842 }
0843 }
0844 }
0845
0846 void
0847 T_measure_runtime(T_measure_runtime_context *ctx,
0848 const T_measure_runtime_request *req)
0849 {
0850
0851
0852
0853
0854 if ((req->flags & T_MEASURE_RUNTIME_DISABLE_FULL_CACHE) == 0) {
0855 measure_full_cache(ctx, req);
0856 }
0857
0858 if ((req->flags & T_MEASURE_RUNTIME_DISABLE_HOT_CACHE) == 0) {
0859 measure_hot_cache(ctx, req);
0860 }
0861
0862 if ((req->flags & T_MEASURE_RUNTIME_DISABLE_DIRTY_CACHE) == 0) {
0863 measure_dirty_cache(ctx, req);
0864 }
0865
0866 measure_load(ctx, req);
0867 }