File indexing completed on 2025-05-11 08:24:42
0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #ifdef HAVE_CONFIG_H
0012 #include "config.h"
0013 #endif
0014
0015 #include <rtems/score/atomic.h>
0016 #include <rtems/score/smpbarrier.h>
0017 #include <rtems.h>
0018 #include <rtems/bsd.h>
0019 #include <rtems/test-info.h>
0020 #include <limits.h>
0021 #include <string.h>
0022
0023 #include "tmacros.h"
0024
0025 const char rtems_test_name[] = "SMPATOMIC 1";
0026
0027 #define MS_PER_TICK 10
0028
0029 #define MASTER_PRIORITY 1
0030
0031 #define WORKER_PRIORITY 2
0032
0033 #define CPU_COUNT 32
0034
0035 typedef struct {
0036 rtems_test_parallel_context base;
0037 Atomic_Ulong atomic_value;
0038 unsigned long per_worker_value[CPU_COUNT];
0039 unsigned long normal_value;
0040 char unused_space_for_cache_line_separation[128];
0041 unsigned long second_value;
0042 Atomic_Flag global_flag;
0043 SMP_barrier_Control barrier;
0044 SMP_barrier_State barrier_state[CPU_COUNT];
0045 sbintime_t load_trigger_time;
0046 sbintime_t load_change_time[CPU_COUNT];
0047 int load_count[CPU_COUNT];
0048 sbintime_t rmw_trigger_time;
0049 sbintime_t rmw_change_time[CPU_COUNT];
0050 int rmw_count[CPU_COUNT];
0051 } smpatomic01_context;
0052
0053 static smpatomic01_context test_instance;
0054
0055 static rtems_interval test_duration(void)
0056 {
0057 return rtems_clock_get_ticks_per_second();
0058 }
0059
0060 static void test_fini(
0061 smpatomic01_context *ctx,
0062 const char *test,
0063 bool atomic
0064 )
0065 {
0066 unsigned long expected_value = 0;
0067 unsigned long actual_value;
0068 size_t worker_index;
0069
0070 printf("=== atomic %s test case ===\n", test);
0071
0072 for (
0073 worker_index = 0;
0074 worker_index < ctx->base.worker_count;
0075 ++worker_index
0076 ) {
0077 unsigned long worker_value = ctx->per_worker_value[worker_index];
0078
0079 expected_value += worker_value;
0080
0081 printf(
0082 "worker %zu value: %lu\n",
0083 worker_index,
0084 worker_value
0085 );
0086 }
0087
0088 if (atomic) {
0089 actual_value = _Atomic_Load_ulong(&ctx->atomic_value, ATOMIC_ORDER_RELAXED);
0090 } else {
0091 actual_value = ctx->normal_value;
0092 }
0093
0094 printf(
0095 "atomic value: expected = %lu, actual = %lu\n",
0096 expected_value,
0097 actual_value
0098 );
0099
0100 rtems_test_assert(expected_value == actual_value);
0101 }
0102
0103
0104 static rtems_interval test_atomic_add_init(
0105 rtems_test_parallel_context *base,
0106 void *arg,
0107 size_t active_workers
0108 )
0109 {
0110 smpatomic01_context *ctx = (smpatomic01_context *) base;
0111
0112 _Atomic_Init_ulong(&ctx->atomic_value, 0);
0113
0114 return test_duration();
0115 }
0116
0117 static void test_atomic_add_body(
0118 rtems_test_parallel_context *base,
0119 void *arg,
0120 size_t active_workers,
0121 size_t worker_index
0122 )
0123 {
0124 smpatomic01_context *ctx = (smpatomic01_context *) base;
0125 unsigned long counter = 0;
0126
0127 while (!rtems_test_parallel_stop_job(&ctx->base)) {
0128 ++counter;
0129 _Atomic_Fetch_add_ulong(&ctx->atomic_value, 1, ATOMIC_ORDER_RELAXED);
0130 }
0131
0132 ctx->per_worker_value[worker_index] = counter;
0133 }
0134
0135 static void test_atomic_add_fini(
0136 rtems_test_parallel_context *base,
0137 void *arg,
0138 size_t active_workers
0139 )
0140 {
0141 smpatomic01_context *ctx = (smpatomic01_context *) base;
0142
0143 test_fini(ctx, "add", true);
0144 }
0145
0146 static rtems_interval test_atomic_flag_init(
0147 rtems_test_parallel_context *base,
0148 void *arg,
0149 size_t active_workers
0150 )
0151 {
0152 smpatomic01_context *ctx = (smpatomic01_context *) base;
0153
0154 _Atomic_Flag_clear(&ctx->global_flag, ATOMIC_ORDER_RELEASE);
0155 ctx->normal_value = 0;
0156
0157 return test_duration();
0158 }
0159
0160 static void test_atomic_flag_body(
0161 rtems_test_parallel_context *base,
0162 void *arg,
0163 size_t active_workers,
0164 size_t worker_index
0165 )
0166 {
0167 smpatomic01_context *ctx = (smpatomic01_context *) base;
0168 unsigned long counter = 0;
0169
0170 while (!rtems_test_parallel_stop_job(&ctx->base)) {
0171 while (_Atomic_Flag_test_and_set(&ctx->global_flag, ATOMIC_ORDER_ACQUIRE)) {
0172
0173 }
0174
0175 ++counter;
0176 ++ctx->normal_value;
0177
0178 _Atomic_Flag_clear(&ctx->global_flag, ATOMIC_ORDER_RELEASE);
0179 }
0180
0181 ctx->per_worker_value[worker_index] = counter;
0182 }
0183
0184 static void test_atomic_flag_fini(
0185 rtems_test_parallel_context *base,
0186 void *arg,
0187 size_t active_workers
0188 )
0189 {
0190 smpatomic01_context *ctx = (smpatomic01_context *) base;
0191
0192 test_fini(ctx, "flag", false);
0193 }
0194
0195 static rtems_interval test_atomic_sub_init(
0196 rtems_test_parallel_context *base,
0197 void *arg,
0198 size_t active_workers
0199 )
0200 {
0201 smpatomic01_context *ctx = (smpatomic01_context *) base;
0202
0203 _Atomic_Init_ulong(&ctx->atomic_value, 0);
0204
0205 return test_duration();
0206 }
0207
0208 static void test_atomic_sub_body(
0209 rtems_test_parallel_context *base,
0210 void *arg,
0211 size_t active_workers,
0212 size_t worker_index
0213 )
0214 {
0215 smpatomic01_context *ctx = (smpatomic01_context *) base;
0216 unsigned long counter = 0;
0217
0218 while (!rtems_test_parallel_stop_job(&ctx->base)) {
0219 --counter;
0220 _Atomic_Fetch_sub_ulong(&ctx->atomic_value, 1, ATOMIC_ORDER_RELAXED);
0221 }
0222
0223 ctx->per_worker_value[worker_index] = counter;
0224 }
0225
0226 static void test_atomic_sub_fini(
0227 rtems_test_parallel_context *base,
0228 void *arg,
0229 size_t active_workers
0230 )
0231 {
0232 smpatomic01_context *ctx = (smpatomic01_context *) base;
0233
0234 test_fini(ctx, "sub", true);
0235 }
0236
0237 static rtems_interval test_atomic_compare_exchange_init(
0238 rtems_test_parallel_context *base,
0239 void *arg,
0240 size_t active_workers
0241 )
0242 {
0243 smpatomic01_context *ctx = (smpatomic01_context *) base;
0244
0245 _Atomic_Init_ulong(&ctx->atomic_value, 0);
0246 ctx->normal_value = 0;
0247
0248 return test_duration();
0249 }
0250
0251 static void test_atomic_compare_exchange_body(
0252 rtems_test_parallel_context *base,
0253 void *arg,
0254 size_t active_workers,
0255 size_t worker_index
0256 )
0257 {
0258 smpatomic01_context *ctx = (smpatomic01_context *) base;
0259 unsigned long counter = 0;
0260
0261 while (!rtems_test_parallel_stop_job(&ctx->base)) {
0262 bool success;
0263
0264 do {
0265 unsigned long zero = 0;
0266
0267 success = _Atomic_Compare_exchange_ulong(
0268 &ctx->atomic_value,
0269 &zero,
0270 1,
0271 ATOMIC_ORDER_ACQUIRE,
0272 ATOMIC_ORDER_RELAXED
0273 );
0274 } while (!success);
0275
0276 ++counter;
0277 ++ctx->normal_value;
0278
0279 _Atomic_Store_ulong(&ctx->atomic_value, 0, ATOMIC_ORDER_RELEASE);
0280 }
0281
0282 ctx->per_worker_value[worker_index] = counter;
0283 }
0284
0285 static void test_atomic_compare_exchange_fini(
0286 rtems_test_parallel_context *base,
0287 void *arg,
0288 size_t active_workers
0289 )
0290 {
0291 smpatomic01_context *ctx = (smpatomic01_context *) base;
0292
0293 test_fini(ctx, "compare exchange", false);
0294 }
0295
0296 static rtems_interval test_atomic_or_and_init(
0297 rtems_test_parallel_context *base,
0298 void *arg,
0299 size_t active_workers
0300 )
0301 {
0302 smpatomic01_context *ctx = (smpatomic01_context *) base;
0303
0304 _Atomic_Init_ulong(&ctx->atomic_value, 0);
0305
0306 return test_duration();
0307 }
0308
0309 static void test_atomic_or_and_body(
0310 rtems_test_parallel_context *base,
0311 void *arg,
0312 size_t active_workers,
0313 size_t worker_index
0314 )
0315 {
0316 smpatomic01_context *ctx = (smpatomic01_context *) base;
0317 unsigned long the_bit = 1UL << worker_index;
0318 unsigned long current_bit = 0;
0319
0320 while (!rtems_test_parallel_stop_job(&ctx->base)) {
0321 unsigned long previous;
0322
0323 if (current_bit != 0) {
0324 previous = _Atomic_Fetch_and_ulong(
0325 &ctx->atomic_value,
0326 ~the_bit,
0327 ATOMIC_ORDER_RELAXED
0328 );
0329 current_bit = 0;
0330 } else {
0331 previous = _Atomic_Fetch_or_ulong(
0332 &ctx->atomic_value,
0333 the_bit,
0334 ATOMIC_ORDER_RELAXED
0335 );
0336 current_bit = the_bit;
0337 }
0338
0339 rtems_test_assert((previous & the_bit) != current_bit);
0340 }
0341
0342 ctx->per_worker_value[worker_index] = current_bit;
0343 }
0344
0345 static void test_atomic_or_and_fini(
0346 rtems_test_parallel_context *base,
0347 void *arg,
0348 size_t active_workers
0349 )
0350 {
0351 smpatomic01_context *ctx = (smpatomic01_context *) base;
0352
0353 test_fini(ctx, "or/and", true);
0354 }
0355
0356 static rtems_interval test_atomic_fence_init(
0357 rtems_test_parallel_context *base,
0358 void *arg,
0359 size_t active_workers
0360 )
0361 {
0362 smpatomic01_context *ctx = (smpatomic01_context *) base;
0363
0364 ctx->normal_value = 0;
0365 ctx->second_value = 0;
0366 _Atomic_Fence(ATOMIC_ORDER_RELEASE);
0367
0368 return test_duration();
0369 }
0370
0371 static void test_atomic_fence_body(
0372 rtems_test_parallel_context *base,
0373 void *arg,
0374 size_t active_workers,
0375 size_t worker_index
0376 )
0377 {
0378 smpatomic01_context *ctx = (smpatomic01_context *) base;
0379
0380 if (rtems_test_parallel_is_master_worker(worker_index)) {
0381 unsigned long counter = 0;
0382
0383 while (!rtems_test_parallel_stop_job(&ctx->base)) {
0384 ++counter;
0385 ctx->normal_value = counter;
0386 _Atomic_Fence(ATOMIC_ORDER_RELEASE);
0387 ctx->second_value = counter;
0388 }
0389 } else {
0390 while (!rtems_test_parallel_stop_job(&ctx->base)) {
0391 unsigned long n;
0392 unsigned long s;
0393
0394 s = ctx->second_value;
0395 _Atomic_Fence(ATOMIC_ORDER_ACQUIRE);
0396 n = ctx->normal_value;
0397
0398 rtems_test_assert(n - s < LONG_MAX);
0399 }
0400 }
0401 }
0402
0403 static void test_atomic_fence_fini(
0404 rtems_test_parallel_context *base,
0405 void *arg,
0406 size_t active_workers
0407 )
0408 {
0409 smpatomic01_context *ctx = (smpatomic01_context *) base;
0410
0411 printf(
0412 "=== atomic fence test case ===\n"
0413 "normal value = %lu, second value = %lu\n",
0414 ctx->normal_value,
0415 ctx->second_value
0416 );
0417 }
0418
0419 static rtems_interval test_atomic_store_load_rmw_init(
0420 rtems_test_parallel_context *base,
0421 void *arg,
0422 size_t active_workers
0423 )
0424 {
0425 smpatomic01_context *ctx = (smpatomic01_context *) base;
0426 size_t i;
0427
0428 _Atomic_Init_ulong(&ctx->atomic_value, 0);
0429
0430 _SMP_barrier_Control_initialize(&ctx->barrier);
0431
0432 for (i = 0; i < active_workers; ++i) {
0433 _SMP_barrier_State_initialize(&ctx->barrier_state[i]);
0434 }
0435
0436 return 0;
0437 }
0438
0439 static sbintime_t now(void)
0440 {
0441 struct bintime bt;
0442
0443 rtems_bsd_binuptime(&bt);
0444 return bttosbt(bt);
0445 }
0446
0447 static void test_atomic_store_load_rmw_body(
0448 rtems_test_parallel_context *base,
0449 void *arg,
0450 size_t active_workers,
0451 size_t worker_index
0452 )
0453 {
0454 smpatomic01_context *ctx = (smpatomic01_context *) base;
0455 uint32_t cpu_self_index;
0456 sbintime_t t;
0457 int counter;
0458
0459 if (rtems_test_parallel_is_master_worker(worker_index)) {
0460 rtems_status_code sc;
0461
0462 sc = rtems_task_wake_after(1);
0463 rtems_test_assert(sc == RTEMS_SUCCESSFUL);
0464
0465 t = now();
0466 t += (MS_PER_TICK / 2) * SBT_1MS;
0467 ctx->load_trigger_time = t;
0468 t += MS_PER_TICK * SBT_1MS;
0469 ctx->rmw_trigger_time = t;
0470 }
0471
0472 _Atomic_Fence(ATOMIC_ORDER_SEQ_CST);
0473
0474 _SMP_barrier_Wait(
0475 &ctx->barrier,
0476 &ctx->barrier_state[worker_index],
0477 active_workers
0478 );
0479
0480
0481
0482
0483
0484 cpu_self_index = rtems_scheduler_get_processor();
0485
0486
0487
0488 counter = 0;
0489 t = ctx->load_trigger_time;
0490
0491 while (now() < t) {
0492
0493 }
0494
0495 if (cpu_self_index == 0) {
0496 _Atomic_Store_ulong(&ctx->atomic_value, 1, ATOMIC_ORDER_RELEASE);
0497 } else {
0498 while (_Atomic_Load_ulong(&ctx->atomic_value, ATOMIC_ORDER_ACQUIRE) == 0) {
0499 ++counter;
0500 }
0501 }
0502
0503 ctx->load_change_time[cpu_self_index] = now();
0504 ctx->load_count[cpu_self_index] = counter;
0505
0506
0507
0508 if (cpu_self_index == 0) {
0509 _Atomic_Store_ulong(&ctx->atomic_value, 0, ATOMIC_ORDER_RELAXED);
0510 }
0511
0512 counter = 0;
0513 t = ctx->rmw_trigger_time;
0514
0515 while (now() < t) {
0516
0517 }
0518
0519 if (cpu_self_index == 0) {
0520 _Atomic_Store_ulong(&ctx->atomic_value, 1, ATOMIC_ORDER_RELAXED);
0521 } else {
0522 while (
0523 (_Atomic_Fetch_or_ulong(&ctx->atomic_value, 2, ATOMIC_ORDER_RELAXED) & 1)
0524 == 0
0525 ) {
0526 ++counter;
0527 }
0528 }
0529
0530 ctx->rmw_change_time[cpu_self_index] = now();
0531 ctx->rmw_count[cpu_self_index] = counter;
0532 }
0533
0534 static void test_atomic_store_load_rmw_fini(
0535 rtems_test_parallel_context *base,
0536 void *arg,
0537 size_t active_workers
0538 )
0539 {
0540 smpatomic01_context *ctx = (smpatomic01_context *) base;
0541 size_t i;
0542 struct bintime bt;
0543 struct timespec ts;
0544
0545 printf("=== atomic store release and load acquire test case ===\n");
0546
0547 for (i = 0; i < active_workers; ++i) {
0548 bt = sbttobt(ctx->load_change_time[i] - ctx->load_trigger_time);
0549 bintime2timespec(&bt, &ts);
0550 printf(
0551 "processor %zu delta %lins, load count %i\n",
0552 i,
0553 ts.tv_nsec,
0554 ctx->load_count[i]
0555 );
0556 }
0557
0558 printf("=== atomic read-modify-write test case ===\n");
0559
0560 for (i = 0; i < active_workers; ++i) {
0561 bt = sbttobt(ctx->rmw_change_time[i] - ctx->rmw_trigger_time);
0562 bintime2timespec(&bt, &ts);
0563 printf(
0564 "processor %zu delta %lins, read-modify-write count %i\n",
0565 i,
0566 ts.tv_nsec,
0567 ctx->rmw_count[i]
0568 );
0569 }
0570 }
0571
0572
0573
0574
0575
0576
0577
0578 static rtems_interval test_seqlock_init(
0579 rtems_test_parallel_context *base,
0580 void *arg,
0581 size_t active_workers
0582 )
0583 {
0584 smpatomic01_context *ctx = (smpatomic01_context *) base;
0585
0586 ctx->normal_value = 0;
0587 ctx->second_value = 0;
0588 _Atomic_Store_ulong(&ctx->atomic_value, 0, ATOMIC_ORDER_RELEASE);
0589
0590 return test_duration();
0591 }
0592
0593 static unsigned long seqlock_read(smpatomic01_context *ctx)
0594 {
0595 unsigned long counter = 0;
0596
0597 while (!rtems_test_parallel_stop_job(&ctx->base)) {
0598 unsigned long seq0;
0599 unsigned long seq1;
0600 unsigned long a;
0601 unsigned long b;
0602
0603 do {
0604 seq0 = _Atomic_Load_ulong(&ctx->atomic_value, ATOMIC_ORDER_ACQUIRE);
0605
0606 a = ctx->normal_value;
0607 b = ctx->second_value;
0608
0609 seq1 =
0610 _Atomic_Fetch_add_ulong(&ctx->atomic_value, 0, ATOMIC_ORDER_RELEASE);
0611 } while (seq0 != seq1 || seq0 % 2 != 0);
0612
0613 ++counter;
0614 rtems_test_assert(a == b);
0615 }
0616
0617 return counter;
0618 }
0619
0620 static void test_single_writer_seqlock_body(
0621 rtems_test_parallel_context *base,
0622 void *arg,
0623 size_t active_workers,
0624 size_t worker_index
0625 )
0626 {
0627 smpatomic01_context *ctx = (smpatomic01_context *) base;
0628 uint32_t cpu_self_index;
0629 unsigned long counter;
0630
0631
0632
0633
0634
0635 cpu_self_index = rtems_scheduler_get_processor();
0636
0637 if (cpu_self_index == 0) {
0638 counter = 0;
0639
0640 while (!rtems_test_parallel_stop_job(&ctx->base)) {
0641 unsigned long seq;
0642
0643 seq = _Atomic_Load_ulong(&ctx->atomic_value, ATOMIC_ORDER_RELAXED);
0644 _Atomic_Store_ulong(&ctx->atomic_value, seq + 1, ATOMIC_ORDER_RELAXED);
0645
0646
0647 _Atomic_Fence(ATOMIC_ORDER_ACQ_REL);
0648
0649 ++counter;
0650 ctx->normal_value = counter;
0651 ctx->second_value = counter;
0652
0653 _Atomic_Store_ulong(&ctx->atomic_value, seq + 2, ATOMIC_ORDER_RELEASE);
0654 }
0655 } else {
0656 counter = seqlock_read(ctx);
0657 }
0658
0659 ctx->per_worker_value[cpu_self_index] = counter;
0660 }
0661
0662 static void test_single_writer_seqlock_fini(
0663 rtems_test_parallel_context *base,
0664 void *arg,
0665 size_t active_workers
0666 )
0667 {
0668 smpatomic01_context *ctx = (smpatomic01_context *) base;
0669 size_t i;
0670
0671 printf("=== single writer seqlock test case ===\n");
0672
0673 for (i = 0; i < active_workers; ++i) {
0674 printf(
0675 "processor %zu count %lu\n",
0676 i,
0677 ctx->per_worker_value[i]
0678 );
0679 }
0680 }
0681
0682 static void test_multi_writer_seqlock_body(
0683 rtems_test_parallel_context *base,
0684 void *arg,
0685 size_t active_workers,
0686 size_t worker_index
0687 )
0688 {
0689 smpatomic01_context *ctx = (smpatomic01_context *) base;
0690 uint32_t cpu_self_index;
0691 unsigned long counter;
0692
0693
0694
0695
0696
0697 cpu_self_index = rtems_scheduler_get_processor();
0698
0699 if (cpu_self_index % 2 == 0) {
0700 counter = 0;
0701
0702 while (!rtems_test_parallel_stop_job(&ctx->base)) {
0703 unsigned long seq;
0704
0705 do {
0706 seq = _Atomic_Load_ulong(&ctx->atomic_value, ATOMIC_ORDER_RELAXED);
0707 } while (
0708 seq % 2 != 0
0709 || !_Atomic_Compare_exchange_ulong(
0710 &ctx->atomic_value,
0711 &seq,
0712 seq + 1,
0713 ATOMIC_ORDER_ACQ_REL,
0714 ATOMIC_ORDER_RELAXED
0715 )
0716 );
0717
0718 ++counter;
0719 ctx->normal_value = counter;
0720 ctx->second_value = counter;
0721
0722 _Atomic_Store_ulong(&ctx->atomic_value, seq + 2, ATOMIC_ORDER_RELEASE);
0723 }
0724 } else {
0725 counter = seqlock_read(ctx);
0726 }
0727
0728 ctx->per_worker_value[cpu_self_index] = counter;
0729 }
0730
0731 static void test_multi_writer_seqlock_fini(
0732 rtems_test_parallel_context *base,
0733 void *arg,
0734 size_t active_workers
0735 )
0736 {
0737 smpatomic01_context *ctx = (smpatomic01_context *) base;
0738 size_t i;
0739
0740 printf("=== multi writer seqlock test case ===\n");
0741
0742 for (i = 0; i < active_workers; ++i) {
0743 printf(
0744 "processor %zu count %lu\n",
0745 i,
0746 ctx->per_worker_value[i]
0747 );
0748 }
0749 }
0750
0751 static const rtems_test_parallel_job test_jobs[] = {
0752 {
0753 .init = test_atomic_add_init,
0754 .body = test_atomic_add_body,
0755 .fini = test_atomic_add_fini
0756 }, {
0757 .init = test_atomic_flag_init,
0758 .body = test_atomic_flag_body,
0759 .fini = test_atomic_flag_fini
0760 }, {
0761 .init = test_atomic_sub_init,
0762 .body = test_atomic_sub_body,
0763 .fini = test_atomic_sub_fini
0764 }, {
0765 .init = test_atomic_compare_exchange_init,
0766 .body = test_atomic_compare_exchange_body,
0767 .fini = test_atomic_compare_exchange_fini
0768 }, {
0769 .init = test_atomic_or_and_init,
0770 .body = test_atomic_or_and_body,
0771 .fini = test_atomic_or_and_fini
0772 }, {
0773 .init = test_atomic_fence_init,
0774 .body = test_atomic_fence_body,
0775 .fini = test_atomic_fence_fini
0776 }, {
0777 .init = test_atomic_store_load_rmw_init,
0778 .body = test_atomic_store_load_rmw_body,
0779 .fini = test_atomic_store_load_rmw_fini
0780 }, {
0781 .init = test_seqlock_init,
0782 .body = test_single_writer_seqlock_body,
0783 .fini = test_single_writer_seqlock_fini
0784 }, {
0785 .init = test_seqlock_init,
0786 .body = test_multi_writer_seqlock_body,
0787 .fini = test_multi_writer_seqlock_fini
0788 }
0789 };
0790
0791 static void setup_worker(
0792 rtems_test_parallel_context *base,
0793 size_t worker_index,
0794 rtems_id worker_id
0795 )
0796 {
0797 rtems_status_code sc;
0798 rtems_task_priority prio;
0799
0800 sc = rtems_task_set_priority(worker_id, WORKER_PRIORITY, &prio);
0801 rtems_test_assert(sc == RTEMS_SUCCESSFUL);
0802 }
0803
0804 static void Init(rtems_task_argument arg)
0805 {
0806 smpatomic01_context *ctx = &test_instance;
0807
0808 TEST_BEGIN();
0809
0810 rtems_test_parallel(
0811 &ctx->base,
0812 setup_worker,
0813 &test_jobs[0],
0814 RTEMS_ARRAY_SIZE(test_jobs)
0815 );
0816
0817 TEST_END();
0818 rtems_test_exit(0);
0819 }
0820
0821 #define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
0822 #define CONFIGURE_APPLICATION_NEEDS_SIMPLE_CONSOLE_DRIVER
0823
0824 #define CONFIGURE_MICROSECONDS_PER_TICK (MS_PER_TICK * 1000)
0825
0826 #define CONFIGURE_MAXIMUM_PROCESSORS CPU_COUNT
0827
0828 #define CONFIGURE_MAXIMUM_TASKS CPU_COUNT
0829
0830 #define CONFIGURE_MAXIMUM_TIMERS 1
0831
0832 #define CONFIGURE_INIT_TASK_PRIORITY MASTER_PRIORITY
0833 #define CONFIGURE_INIT_TASK_INITIAL_MODES RTEMS_DEFAULT_MODES
0834 #define CONFIGURE_INIT_TASK_ATTRIBUTES RTEMS_DEFAULT_ATTRIBUTES
0835
0836 #define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION
0837
0838 #define CONFIGURE_RTEMS_INIT_TASKS_TABLE
0839
0840 #define CONFIGURE_INIT
0841
0842 #include <rtems/confdefs.h>