File indexing completed on 2025-05-11 08:24:21
0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #ifdef HAVE_CONFIG_H
0034 #include "config.h"
0035 #endif
0036
0037 #include <rtems/recordclient.h>
0038
0039 #include <stdlib.h>
0040 #include <string.h>
0041
0042 #define TIME_MASK ( ( UINT32_C( 1 ) << RTEMS_RECORD_TIME_BITS ) - 1 )
0043
0044 static rtems_record_client_status visit(
0045 rtems_record_client_context *ctx,
0046 uint32_t time_event,
0047 uint64_t data
0048 );
0049
0050 static rtems_record_client_status consume_error(
0051 rtems_record_client_context *ctx,
0052 const void *buf,
0053 size_t n
0054 )
0055 {
0056 (void) buf;
0057 (void) n;
0058
0059 return ctx->status;
0060 }
0061
0062 static rtems_record_client_status error(
0063 rtems_record_client_context *ctx,
0064 rtems_record_client_status status
0065 )
0066 {
0067 ctx->status = status;
0068 ctx->consume = consume_error;
0069
0070 return status;
0071 }
0072
0073 static rtems_record_client_status process_per_cpu_count(
0074 rtems_record_client_context *ctx,
0075 uint64_t data
0076 )
0077 {
0078 size_t item_capacity;
0079 uint32_t cpu;
0080
0081 if ( ctx->per_cpu_items != 0 ) {
0082 return error( ctx, RTEMS_RECORD_CLIENT_ERROR_DOUBLE_PER_CPU_COUNT );
0083 }
0084
0085 if ( ctx->cpu_count == 0 ) {
0086 return error( ctx, RTEMS_RECORD_CLIENT_ERROR_NO_CPU_MAX );
0087 }
0088
0089 ctx->per_cpu_items = (uint32_t) data;
0090
0091
0092
0093
0094
0095
0096 item_capacity = 2 * ctx->per_cpu_items;
0097
0098 for ( cpu = 0; cpu < ctx->cpu_count; ++cpu ) {
0099 rtems_record_client_per_cpu *per_cpu;
0100
0101 per_cpu = &ctx->per_cpu[ cpu ];
0102 per_cpu->items = realloc(
0103 per_cpu->items,
0104 item_capacity * sizeof( *per_cpu->items )
0105 );
0106
0107 if ( per_cpu->items == NULL ) {
0108 return error( ctx, RTEMS_RECORD_CLIENT_ERROR_NO_MEMORY );
0109 }
0110
0111 per_cpu->item_capacity = item_capacity;
0112 }
0113
0114 return RTEMS_RECORD_CLIENT_SUCCESS;
0115 }
0116
0117 static void set_to_bt_scaler(
0118 rtems_record_client_context *ctx,
0119 uint32_t frequency
0120 )
0121 {
0122 uint64_t bin_per_s;
0123
0124 bin_per_s = UINT64_C( 1 ) << 32;
0125 ctx->to_bt_scaler = ( ( bin_per_s << 31 ) + frequency / 2 ) / frequency;
0126 }
0127
0128 static bool has_time( rtems_record_event event )
0129 {
0130 return event > RTEMS_RECORD_NO_TIME_LAST;
0131 }
0132
0133 static uint64_t time_bt(
0134 const rtems_record_client_context *ctx,
0135 rtems_record_client_per_cpu *per_cpu,
0136 uint32_t time,
0137 rtems_record_event event
0138 )
0139 {
0140 uint64_t time_accumulated;
0141 uint64_t last_bt;
0142 uint64_t bt;
0143
0144 time_accumulated = per_cpu->uptime.time_accumulated;
0145
0146 if ( has_time( event) ) {
0147 time_accumulated += ( time - per_cpu->uptime.time_last ) & TIME_MASK;
0148 per_cpu->uptime.time_last = time;
0149 per_cpu->uptime.time_accumulated = time_accumulated;
0150 }
0151
0152 last_bt = per_cpu->last_bt;
0153 bt = per_cpu->uptime.uptime_bt;
0154 bt += ( time_accumulated * ctx->to_bt_scaler ) >> 31;
0155
0156 if ( bt >= last_bt ) {
0157 per_cpu->last_bt = bt;
0158
0159 return bt;
0160 }
0161
0162 (void) ( *ctx->handler )(
0163 last_bt,
0164 ctx->cpu,
0165 RTEMS_RECORD_TIME_ADJUSTMENT,
0166 last_bt - bt,
0167 ctx->handler_arg
0168 );
0169
0170 return last_bt;
0171 }
0172
0173 static rtems_record_client_status call_handler(
0174 const rtems_record_client_context *ctx,
0175 rtems_record_client_per_cpu *per_cpu,
0176 uint32_t time,
0177 rtems_record_event event,
0178 uint64_t data
0179 )
0180 {
0181 return ( *ctx->handler )(
0182 time_bt( ctx, per_cpu, time, event ),
0183 ctx->cpu,
0184 event,
0185 data,
0186 ctx->handler_arg
0187 );
0188 }
0189
0190 static rtems_record_client_status resolve_hold_back(
0191 rtems_record_client_context *ctx,
0192 rtems_record_client_per_cpu *per_cpu
0193 )
0194 {
0195 rtems_record_item_64 *items;
0196 uint32_t last;
0197 uint64_t accumulated;
0198 size_t index;
0199 rtems_record_client_uptime uptime;
0200
0201 items = per_cpu->items;
0202 last = per_cpu->uptime.time_last;
0203 accumulated = 0;
0204
0205 for ( index = per_cpu->item_index; index > 0; --index ) {
0206 uint32_t time_event;
0207
0208 time_event = items[ index - 1 ].event;
0209
0210 if ( has_time( RTEMS_RECORD_GET_EVENT( time_event ) ) ) {
0211 uint32_t time;
0212
0213 time = RTEMS_RECORD_GET_TIME( time_event );
0214 accumulated += ( last - time ) & TIME_MASK;
0215 last = time;
0216 }
0217 }
0218
0219 uptime = per_cpu->uptime;
0220 per_cpu->uptime.uptime_bt -= ( accumulated * ctx->to_bt_scaler ) >> 31;
0221 per_cpu->uptime.time_last = last;
0222 per_cpu->uptime.time_accumulated = 0;
0223
0224 for ( index = 0; index < per_cpu->item_index; ++index ) {
0225 uint32_t time_event;
0226 rtems_record_client_status status;
0227
0228 time_event = items[ index ].event;
0229 status = call_handler(
0230 ctx,
0231 per_cpu,
0232 RTEMS_RECORD_GET_TIME( time_event ),
0233 RTEMS_RECORD_GET_EVENT( time_event ),
0234 items[ index ].data
0235 );
0236
0237 if ( status != RTEMS_RECORD_CLIENT_SUCCESS ) {
0238 return status;
0239 }
0240 }
0241
0242 per_cpu->uptime = uptime;
0243 per_cpu->hold_back = false;
0244 per_cpu->item_index = 0;
0245
0246 return RTEMS_RECORD_CLIENT_SUCCESS;
0247 }
0248
0249 static rtems_record_client_status hold_back(
0250 rtems_record_client_context *ctx,
0251 rtems_record_client_per_cpu *per_cpu,
0252 uint32_t time_event,
0253 uint64_t data
0254 )
0255 {
0256 uint32_t item_index;
0257
0258 item_index = per_cpu->item_index;
0259
0260 if ( item_index >= per_cpu->item_capacity ) {
0261 if ( item_index >= RTEMS_RECORD_CLIENT_HOLD_BACK_REALLOCATION_LIMIT ) {
0262 return error( ctx, RTEMS_RECORD_CLIENT_ERROR_PER_CPU_ITEMS_OVERFLOW );
0263 }
0264
0265 per_cpu->item_capacity = 2 * item_index;
0266 per_cpu->items = realloc(
0267 per_cpu->items,
0268 per_cpu->item_capacity * sizeof( *per_cpu->items )
0269 );
0270
0271 if ( per_cpu->items == NULL ) {
0272 return error( ctx, RTEMS_RECORD_CLIENT_ERROR_NO_MEMORY );
0273 }
0274 }
0275
0276 per_cpu->items[ item_index ].event = time_event;
0277 per_cpu->items[ item_index ].data = data;
0278 per_cpu->item_index = item_index + 1;
0279
0280 return RTEMS_RECORD_CLIENT_SUCCESS;
0281 }
0282
0283 static rtems_record_client_status visit(
0284 rtems_record_client_context *ctx,
0285 uint32_t time_event,
0286 uint64_t data
0287 )
0288 {
0289 rtems_record_client_per_cpu *per_cpu;
0290 uint32_t time;
0291 rtems_record_event event;
0292 rtems_record_client_status status;
0293 bool do_hold_back;
0294
0295 per_cpu = &ctx->per_cpu[ ctx->cpu ];
0296 time = RTEMS_RECORD_GET_TIME( time_event );
0297 event = RTEMS_RECORD_GET_EVENT( time_event );
0298 do_hold_back = per_cpu->hold_back;
0299
0300 switch ( event ) {
0301 case RTEMS_RECORD_PROCESSOR:
0302 if ( data >= ctx->cpu_count ) {
0303 return error( ctx, RTEMS_RECORD_CLIENT_ERROR_UNSUPPORTED_CPU );
0304 }
0305
0306 ctx->cpu = (uint32_t) data;
0307 per_cpu = &ctx->per_cpu[ ctx->cpu ];
0308 break;
0309 case RTEMS_RECORD_UPTIME_LOW:
0310 per_cpu->uptime_low = (uint32_t) data;
0311 per_cpu->uptime_low_valid = true;
0312 break;
0313 case RTEMS_RECORD_UPTIME_HIGH:
0314 if ( per_cpu->uptime_low_valid ) {
0315 per_cpu->uptime_low_valid = false;
0316 per_cpu->uptime.uptime_bt = ( data << 32 ) | per_cpu->uptime_low;
0317 per_cpu->uptime.time_last = time;
0318 per_cpu->uptime.time_accumulated = 0;
0319
0320 if (do_hold_back) {
0321 status = resolve_hold_back( ctx, per_cpu );
0322
0323 if ( status != RTEMS_RECORD_CLIENT_SUCCESS ) {
0324 return status;
0325 }
0326 }
0327
0328 do_hold_back = false;
0329 }
0330
0331 break;
0332 case RTEMS_RECORD_PROCESSOR_MAXIMUM:
0333 if ( data >= RTEMS_RECORD_CLIENT_MAXIMUM_CPU_COUNT ) {
0334 return error( ctx, RTEMS_RECORD_CLIENT_ERROR_UNSUPPORTED_CPU_MAX );
0335 }
0336
0337 if ( ctx->cpu_count != 0 ) {
0338 return error( ctx, RTEMS_RECORD_CLIENT_ERROR_DOUBLE_CPU_MAX );
0339 }
0340
0341 ctx->cpu_count = (uint32_t) data + 1;
0342 do_hold_back = false;
0343 break;
0344 case RTEMS_RECORD_PER_CPU_COUNT:
0345 status = process_per_cpu_count( ctx, data );
0346
0347 if ( status != RTEMS_RECORD_CLIENT_SUCCESS ) {
0348 return status;
0349 }
0350
0351 break;
0352 case RTEMS_RECORD_PER_CPU_OVERFLOW:
0353 do_hold_back = true;
0354 per_cpu->hold_back = true;
0355 break;
0356 case RTEMS_RECORD_FREQUENCY:
0357 set_to_bt_scaler( ctx, (uint32_t) data );
0358 break;
0359 case RTEMS_RECORD_VERSION:
0360 if ( data != RTEMS_RECORD_THE_VERSION ) {
0361 return error( ctx, RTEMS_RECORD_CLIENT_ERROR_UNSUPPORTED_VERSION );
0362 }
0363
0364 do_hold_back = false;
0365 break;
0366 default:
0367 break;
0368 }
0369
0370 if ( do_hold_back ) {
0371 return hold_back( ctx, per_cpu, time_event, data );
0372 }
0373
0374 return call_handler( ctx, per_cpu, time, event, data );
0375 }
0376
0377 static rtems_record_client_status consume_32(
0378 rtems_record_client_context *ctx,
0379 const void *buf,
0380 size_t n
0381 )
0382 {
0383 while ( n > 0 ) {
0384 size_t m;
0385 char *pos;
0386
0387 m = ctx->todo < n ? ctx->todo : n;
0388 pos = ctx->pos;
0389 pos = memcpy( pos, buf, m );
0390 n -= m;
0391 buf = (char *) buf + m;
0392
0393 if ( m == ctx->todo ) {
0394 rtems_record_client_status status;
0395
0396 ctx->todo = sizeof( ctx->item.format_32 );
0397 ctx->pos = &ctx->item.format_32;
0398
0399 status = visit(
0400 ctx,
0401 ctx->item.format_32.event,
0402 ctx->item.format_32.data
0403 );
0404
0405 if ( status != RTEMS_RECORD_CLIENT_SUCCESS ) {
0406 return status;
0407 }
0408 } else {
0409 ctx->todo -= m;
0410 ctx->pos = pos + m;
0411 }
0412 }
0413
0414 return RTEMS_RECORD_CLIENT_SUCCESS;
0415 }
0416
0417 static rtems_record_client_status consume_64(
0418 rtems_record_client_context *ctx,
0419 const void *buf,
0420 size_t n
0421 )
0422 {
0423 while ( n > 0 ) {
0424 size_t m;
0425 char *pos;
0426
0427 m = ctx->todo < n ? ctx->todo : n;
0428 pos = ctx->pos;
0429 pos = memcpy( pos, buf, m );
0430 n -= m;
0431 buf = (char *) buf + m;
0432
0433 if ( m == ctx->todo ) {
0434 rtems_record_client_status status;
0435
0436 ctx->todo = sizeof( ctx->item.format_64 );
0437 ctx->pos = &ctx->item.format_64;
0438
0439 status = visit(
0440 ctx,
0441 ctx->item.format_64.event,
0442 ctx->item.format_64.data
0443 );
0444
0445 if ( status != RTEMS_RECORD_CLIENT_SUCCESS ) {
0446 return status;
0447 }
0448 } else {
0449 ctx->todo -= m;
0450 ctx->pos = pos + m;
0451 }
0452 }
0453
0454 return RTEMS_RECORD_CLIENT_SUCCESS;
0455 }
0456
0457 static rtems_record_client_status consume_swap_32(
0458 rtems_record_client_context *ctx,
0459 const void *buf,
0460 size_t n
0461 )
0462 {
0463 while ( n > 0 ) {
0464 size_t m;
0465 char *pos;
0466
0467 m = ctx->todo < n ? ctx->todo : n;
0468 pos = ctx->pos;
0469 pos = memcpy( pos, buf, m );
0470 n -= m;
0471 buf = (char *) buf + m;
0472
0473 if ( m == ctx->todo ) {
0474 rtems_record_client_status status;
0475
0476 ctx->todo = sizeof( ctx->item.format_32 );
0477 ctx->pos = &ctx->item.format_32;
0478
0479 status = visit(
0480 ctx,
0481 __builtin_bswap32( ctx->item.format_32.event ),
0482 __builtin_bswap32( ctx->item.format_32.data )
0483 );
0484
0485 if ( status != RTEMS_RECORD_CLIENT_SUCCESS ) {
0486 return status;
0487 }
0488 } else {
0489 ctx->todo -= m;
0490 ctx->pos = pos + m;
0491 }
0492 }
0493
0494 return RTEMS_RECORD_CLIENT_SUCCESS;
0495 }
0496
0497 static rtems_record_client_status consume_swap_64(
0498 rtems_record_client_context *ctx,
0499 const void *buf,
0500 size_t n
0501 )
0502 {
0503 while ( n > 0 ) {
0504 size_t m;
0505 char *pos;
0506
0507 m = ctx->todo < n ? ctx->todo : n;
0508 pos = ctx->pos;
0509 pos = memcpy( pos, buf, m );
0510 n -= m;
0511 buf = (char *) buf + m;
0512
0513 if ( m == ctx->todo ) {
0514 rtems_record_client_status status;
0515
0516 ctx->todo = sizeof( ctx->item.format_64 );
0517 ctx->pos = &ctx->item.format_64;
0518
0519 status = visit(
0520 ctx,
0521 __builtin_bswap32( ctx->item.format_64.event ),
0522 __builtin_bswap64( ctx->item.format_64.data )
0523 );
0524
0525 if ( status != RTEMS_RECORD_CLIENT_SUCCESS ) {
0526 return status;
0527 }
0528 } else {
0529 ctx->todo -= m;
0530 ctx->pos = pos + m;
0531 }
0532 }
0533
0534 return RTEMS_RECORD_CLIENT_SUCCESS;
0535 }
0536
0537 static rtems_record_client_status consume_init(
0538 rtems_record_client_context *ctx,
0539 const void *buf,
0540 size_t n
0541 )
0542 {
0543 while ( n > 0 ) {
0544 size_t m;
0545 char *pos;
0546
0547 m = ctx->todo < n ? ctx->todo : n;
0548 pos = ctx->pos;
0549 pos = memcpy( pos, buf, m );
0550 n -= m;
0551 buf = (char *) buf + m;
0552
0553 if ( m == ctx->todo ) {
0554 uint32_t magic;
0555
0556 magic = ctx->header[ 1 ];
0557
0558 switch ( ctx->header[ 0 ] ) {
0559 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
0560 case RTEMS_RECORD_FORMAT_LE_32:
0561 ctx->todo = sizeof( ctx->item.format_32 );
0562 ctx->pos = &ctx->item.format_32;
0563 ctx->consume = consume_32;
0564 ctx->data_size = 4;
0565 break;
0566 case RTEMS_RECORD_FORMAT_LE_64:
0567 ctx->todo = sizeof( ctx->item.format_64 );
0568 ctx->pos = &ctx->item.format_64;
0569 ctx->consume = consume_64;
0570 ctx->data_size = 8;
0571 break;
0572 case RTEMS_RECORD_FORMAT_BE_32:
0573 ctx->todo = sizeof( ctx->item.format_32 );
0574 ctx->pos = &ctx->item.format_32;
0575 ctx->consume = consume_swap_32;
0576 ctx->data_size = 4;
0577 magic = __builtin_bswap32( magic );
0578 break;
0579 case RTEMS_RECORD_FORMAT_BE_64:
0580 ctx->todo = sizeof( ctx->item.format_64 );
0581 ctx->pos = &ctx->item.format_64;
0582 ctx->consume = consume_swap_64;
0583 ctx->data_size = 8;
0584 magic = __builtin_bswap32( magic );
0585 break;
0586 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
0587 case RTEMS_RECORD_FORMAT_LE_32:
0588 ctx->todo = sizeof( ctx->item.format_32 );
0589 ctx->pos = &ctx->item.format_32;
0590 ctx->consume = consume_swap_32;
0591 ctx->data_size = 4;
0592 magic = __builtin_bswap32( magic );
0593 break;
0594 case RTEMS_RECORD_FORMAT_LE_64:
0595 ctx->todo = sizeof( ctx->item.format_64 );
0596 ctx->pos = &ctx->item.format_64;
0597 ctx->consume = consume_swap_64;
0598 ctx->data_size = 8;
0599 magic = __builtin_bswap32( magic );
0600 break;
0601 case RTEMS_RECORD_FORMAT_BE_32:
0602 ctx->todo = sizeof( ctx->item.format_32 );
0603 ctx->pos = &ctx->item.format_32;
0604 ctx->consume = consume_32;
0605 ctx->data_size = 4;
0606 break;
0607 case RTEMS_RECORD_FORMAT_BE_64:
0608 ctx->todo = sizeof( ctx->item.format_64 );
0609 ctx->pos = &ctx->item.format_64;
0610 ctx->consume = consume_64;
0611 ctx->data_size = 8;
0612 break;
0613 #else
0614 #error "unexpected __BYTE_ORDER__"
0615 #endif
0616 default:
0617 return error( ctx, RTEMS_RECORD_CLIENT_ERROR_UNKNOWN_FORMAT );
0618 }
0619
0620 if ( magic != RTEMS_RECORD_MAGIC ) {
0621 return error( ctx, RTEMS_RECORD_CLIENT_ERROR_INVALID_MAGIC );
0622 }
0623
0624 return rtems_record_client_run( ctx, buf, n );
0625 } else {
0626 ctx->todo -= m;
0627 ctx->pos = pos + m;
0628 }
0629 }
0630
0631 return RTEMS_RECORD_CLIENT_SUCCESS;
0632 }
0633
0634 rtems_record_client_status rtems_record_client_init(
0635 rtems_record_client_context *ctx,
0636 rtems_record_client_handler handler,
0637 void *arg
0638 )
0639 {
0640 uint32_t cpu;
0641
0642 ctx = memset( ctx, 0, sizeof( *ctx ) );
0643 ctx->to_bt_scaler = UINT64_C( 1 ) << 31;
0644 ctx->handler = handler;
0645 ctx->handler_arg = arg;
0646 ctx->todo = sizeof( ctx->header );
0647 ctx->pos = &ctx->header;
0648 ctx->consume = consume_init;
0649
0650 for ( cpu = 0; cpu < RTEMS_RECORD_CLIENT_MAXIMUM_CPU_COUNT; ++cpu ) {
0651 ctx->per_cpu[ cpu ].hold_back = true;
0652 }
0653
0654 return RTEMS_RECORD_CLIENT_SUCCESS;
0655 }
0656
0657 rtems_record_client_status rtems_record_client_run(
0658 rtems_record_client_context *ctx,
0659 const void *buf,
0660 size_t n
0661 )
0662 {
0663 return ( *ctx->consume )( ctx, buf, n );
0664 }
0665
0666 static void calculate_best_effort_uptime(
0667 rtems_record_client_context *ctx,
0668 rtems_record_client_per_cpu *per_cpu
0669 )
0670 {
0671 rtems_record_item_64 *items;
0672 uint32_t last;
0673 uint64_t accumulated;
0674 size_t index;
0675
0676 items = per_cpu->items;
0677 accumulated = 0;
0678
0679 if ( per_cpu->uptime.uptime_bt != 0 ) {
0680 last = per_cpu->uptime.time_last;
0681 } else {
0682 last = RTEMS_RECORD_GET_TIME( items[ 0 ].event );
0683 }
0684
0685 for ( index = 0; index < per_cpu->item_index; ++index ) {
0686 uint32_t time_event;
0687
0688 time_event = items[ index ].event;
0689
0690 if ( has_time( RTEMS_RECORD_GET_EVENT( time_event ) ) ) {
0691 uint32_t time;
0692
0693 time = RTEMS_RECORD_GET_TIME( time_event );
0694 accumulated += ( time - last ) & TIME_MASK;
0695 last = time;
0696 }
0697 }
0698
0699 per_cpu->uptime.uptime_bt += ( accumulated * ctx->to_bt_scaler ) >> 31;
0700 per_cpu->uptime.time_last = last;
0701 per_cpu->uptime.time_accumulated = 0;
0702 }
0703
0704 void rtems_record_client_destroy(
0705 rtems_record_client_context *ctx
0706 )
0707 {
0708 uint32_t cpu;
0709
0710 for ( cpu = 0; cpu < ctx->cpu_count; ++cpu ) {
0711 rtems_record_client_per_cpu *per_cpu;
0712
0713 ctx->cpu = cpu;
0714 per_cpu = &ctx->per_cpu[ cpu ];
0715
0716 if ( per_cpu->hold_back && per_cpu->item_index > 0 ) {
0717 (void) call_handler(
0718 ctx,
0719 per_cpu,
0720 0,
0721 RTEMS_RECORD_UNRELIABLE_TIME,
0722 0
0723 );
0724 calculate_best_effort_uptime( ctx, per_cpu );
0725 (void) resolve_hold_back( ctx, per_cpu );
0726 }
0727
0728 free( per_cpu->items );
0729 }
0730 }