File indexing completed on 2025-05-11 08:24:13
0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040 #ifndef _RTEMS_SCORE_THREADIMPL_H
0041 #define _RTEMS_SCORE_THREADIMPL_H
0042
0043 #include <rtems/score/thread.h>
0044 #include <rtems/score/assert.h>
0045 #include <rtems/score/chainimpl.h>
0046 #include <rtems/score/interr.h>
0047 #include <rtems/score/isr.h>
0048 #include <rtems/score/objectimpl.h>
0049 #include <rtems/score/schedulernodeimpl.h>
0050 #include <rtems/score/statesimpl.h>
0051 #include <rtems/score/status.h>
0052 #include <rtems/score/sysstate.h>
0053 #include <rtems/score/timestampimpl.h>
0054 #include <rtems/score/threadqimpl.h>
0055 #include <rtems/score/todimpl.h>
0056 #include <rtems/score/watchdogimpl.h>
0057 #include <rtems/config.h>
0058
0059 #ifdef __cplusplus
0060 extern "C" {
0061 #endif
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073 typedef struct {
0074 #if defined(RTEMS_SMP)
0075
0076
0077
0078 ISR_lock_Control Lock;
0079 #endif
0080
0081
0082
0083
0084 Chain_Control Chain;
0085 } Thread_Zombie_registry;
0086
0087
0088
0089
0090
0091
0092
0093
0094 extern Thread_Zombie_registry _Thread_Zombies;
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104 extern Objects_Id _Thread_Global_constructor;
0105
0106
0107
0108
0109
0110 #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
0111 extern Thread_Control *_Thread_Allocated_fp;
0112 #endif
0113
0114 #if defined(RTEMS_SMP)
0115 #define THREAD_OF_SCHEDULER_HELP_NODE( node ) \
0116 RTEMS_CONTAINER_OF( node, Thread_Control, Scheduler.Help_node )
0117 #endif
0118
0119 typedef bool ( *Thread_Visitor )( Thread_Control *the_thread, void *arg );
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129 void _Thread_Iterate(
0130 Thread_Visitor visitor,
0131 void *arg
0132 );
0133
0134
0135
0136
0137
0138
0139 void _Thread_Initialize_information( Thread_Information *information );
0140
0141
0142
0143
0144
0145
0146 void _Thread_Handler_initialization(void);
0147
0148
0149
0150
0151
0152
0153
0154
0155 void _Thread_Create_idle(void);
0156
0157
0158
0159
0160
0161
0162
0163
0164 RTEMS_NO_RETURN void _Thread_Start_multitasking( void );
0165
0166
0167
0168
0169 typedef struct {
0170
0171
0172
0173 const struct _Scheduler_Control *scheduler;
0174
0175
0176
0177
0178 void *stack_area;
0179
0180
0181
0182
0183 size_t stack_size;
0184
0185
0186
0187
0188
0189
0190 void ( *stack_free )( void * );
0191
0192
0193
0194
0195 Priority_Control priority;
0196
0197
0198
0199
0200 const Thread_CPU_budget_operations *cpu_budget_operations;
0201
0202
0203
0204
0205 uint32_t name;
0206
0207
0208
0209
0210 uint32_t isr_level;
0211
0212
0213
0214
0215 bool is_fp;
0216
0217
0218
0219
0220 bool is_preemptible;
0221 } Thread_Configuration;
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244 Status_Control _Thread_Initialize(
0245 Thread_Information *information,
0246 Thread_Control *the_thread,
0247 const Thread_Configuration *config
0248 );
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260 void _Thread_Free(
0261 Thread_Information *information,
0262 Thread_Control *the_thread
0263 );
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298 Status_Control _Thread_Start(
0299 Thread_Control *the_thread,
0300 const Thread_Entry_information *entry,
0301 ISR_lock_Context *lock_context
0302 );
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317 Status_Control _Thread_Restart(
0318 Thread_Control *the_thread,
0319 const Thread_Entry_information *entry,
0320 ISR_lock_Context *lock_context
0321 );
0322
0323
0324
0325
0326
0327
0328 void _Thread_Yield( Thread_Control *executing );
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341 Thread_Life_state _Thread_Change_life(
0342 Thread_Life_state life_states_to_clear,
0343 Thread_Life_state life_states_to_set,
0344 Thread_Life_state ignored_life_states
0345 );
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357 Thread_Life_state _Thread_Set_life_protection( Thread_Life_state state );
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368 void _Thread_Kill_zombies( void );
0369
0370
0371
0372
0373
0374
0375
0376
0377 RTEMS_NO_RETURN void _Thread_Exit(
0378 void *exit_value,
0379 Thread_Life_state life_states_to_set
0380 );
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399 Status_Control _Thread_Join(
0400 Thread_Control *the_thread,
0401 States_Control waiting_for_join,
0402 Thread_Control *executing,
0403 Thread_queue_Context *queue_context
0404 );
0405
0406
0407
0408
0409 typedef enum {
0410
0411
0412
0413 THREAD_CANCEL_DONE,
0414
0415
0416
0417
0418
0419
0420 THREAD_CANCEL_IN_PROGRESS
0421 } Thread_Cancel_state;
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433 Thread_Cancel_state _Thread_Cancel(
0434 Thread_Control *the_thread,
0435 Thread_Control *executing,
0436 Thread_Life_state life_states_to_clear
0437 );
0438
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452
0453
0454
0455
0456
0457 Status_Control _Thread_Close(
0458 Thread_Control *the_thread,
0459 Thread_Control *executing,
0460 Thread_queue_Context *queue_context
0461 );
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471 static inline bool _Thread_Is_ready( const Thread_Control *the_thread )
0472 {
0473 return _States_Is_ready( the_thread->current_state );
0474 }
0475
0476
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487 States_Control _Thread_Clear_state_locked(
0488 Thread_Control *the_thread,
0489 States_Control state
0490 );
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503 States_Control _Thread_Clear_state(
0504 Thread_Control *the_thread,
0505 States_Control state
0506 );
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518
0519 States_Control _Thread_Set_state_locked(
0520 Thread_Control *the_thread,
0521 States_Control state
0522 );
0523
0524
0525
0526
0527
0528
0529
0530
0531
0532
0533
0534
0535 States_Control _Thread_Set_state(
0536 Thread_Control *the_thread,
0537 States_Control state
0538 );
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548 void _Thread_Load_environment(
0549 Thread_Control *the_thread
0550 );
0551
0552
0553
0554
0555
0556
0557 void _Thread_Entry_adaptor_idle( Thread_Control *executing );
0558
0559
0560
0561
0562
0563
0564 void _Thread_Entry_adaptor_numeric( Thread_Control *executing );
0565
0566
0567
0568
0569
0570
0571
0572
0573 void _Thread_Entry_adaptor_pointer( Thread_Control *executing );
0574
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584
0585
0586
0587
0588
0589
0590 void _Thread_Handler( void );
0591
0592
0593
0594
0595
0596
0597
0598 static inline void _Thread_State_acquire_critical(
0599 Thread_Control *the_thread,
0600 ISR_lock_Context *lock_context
0601 )
0602 {
0603 _Thread_queue_Do_acquire_critical( &the_thread->Join_queue, lock_context );
0604 }
0605
0606
0607
0608
0609
0610
0611
0612 static inline void _Thread_State_acquire(
0613 Thread_Control *the_thread,
0614 ISR_lock_Context *lock_context
0615 )
0616 {
0617 _ISR_lock_ISR_disable( lock_context );
0618 _Thread_State_acquire_critical( the_thread, lock_context );
0619 }
0620
0621
0622
0623
0624
0625
0626
0627
0628
0629 static inline Thread_Control *_Thread_State_acquire_for_executing(
0630 ISR_lock_Context *lock_context
0631 )
0632 {
0633 Thread_Control *executing;
0634
0635 _ISR_lock_ISR_disable( lock_context );
0636 executing = _Thread_Executing;
0637 _Thread_State_acquire_critical( executing, lock_context );
0638
0639 return executing;
0640 }
0641
0642
0643
0644
0645
0646
0647
0648 static inline void _Thread_State_release_critical(
0649 Thread_Control *the_thread,
0650 ISR_lock_Context *lock_context
0651 )
0652 {
0653 _Thread_queue_Do_release_critical( &the_thread->Join_queue, lock_context );
0654 }
0655
0656
0657
0658
0659
0660
0661
0662 static inline void _Thread_State_release(
0663 Thread_Control *the_thread,
0664 ISR_lock_Context *lock_context
0665 )
0666 {
0667 _Thread_State_release_critical( the_thread, lock_context );
0668 _ISR_lock_ISR_enable( lock_context );
0669 }
0670
0671
0672
0673
0674
0675
0676
0677
0678
0679 #if defined(RTEMS_DEBUG)
0680 static inline bool _Thread_State_is_owner(
0681 const Thread_Control *the_thread
0682 )
0683 {
0684 return _Thread_queue_Is_lock_owner( &the_thread->Join_queue );
0685 }
0686 #endif
0687
0688
0689
0690
0691
0692
0693
0694
0695
0696
0697
0698
0699
0700 void _Thread_Priority_perform_actions(
0701 Thread_Control *start_of_path,
0702 Thread_queue_Context *queue_context
0703 );
0704
0705
0706
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718
0719
0720 void _Thread_Priority_add(
0721 Thread_Control *the_thread,
0722 Priority_Node *priority_node,
0723 Thread_queue_Context *queue_context
0724 );
0725
0726
0727
0728
0729
0730
0731
0732
0733
0734
0735
0736
0737
0738
0739
0740
0741 void _Thread_Priority_remove(
0742 Thread_Control *the_thread,
0743 Priority_Node *priority_node,
0744 Thread_queue_Context *queue_context
0745 );
0746
0747
0748
0749
0750
0751
0752
0753
0754
0755
0756
0757
0758
0759
0760
0761
0762
0763
0764
0765
0766 void _Thread_Priority_changed(
0767 Thread_Control *the_thread,
0768 Priority_Node *priority_node,
0769 Priority_Group_order priority_group_order,
0770 Thread_queue_Context *queue_context
0771 );
0772
0773
0774
0775
0776
0777
0778
0779
0780
0781
0782
0783
0784
0785
0786
0787
0788
0789
0790
0791
0792
0793
0794 static inline void _Thread_Priority_change(
0795 Thread_Control *the_thread,
0796 Priority_Node *priority_node,
0797 Priority_Control new_priority,
0798 Priority_Group_order priority_group_order,
0799 Thread_queue_Context *queue_context
0800 )
0801 {
0802 _Priority_Node_set_priority( priority_node, new_priority );
0803
0804 #if defined(RTEMS_SCORE_THREAD_REAL_PRIORITY_MAY_BE_INACTIVE)
0805 if ( !_Priority_Node_is_active( priority_node ) ) {
0806
0807 return;
0808 }
0809 #endif
0810
0811 _Thread_Priority_changed(
0812 the_thread,
0813 priority_node,
0814 priority_group_order,
0815 queue_context
0816 );
0817 }
0818
0819 #if defined(RTEMS_SMP)
0820
0821
0822
0823
0824
0825
0826
0827
0828
0829
0830
0831
0832 void _Thread_Priority_replace(
0833 Thread_Control *the_thread,
0834 Priority_Node *victim_node,
0835 Priority_Node *replacement_node
0836 );
0837 #endif
0838
0839
0840
0841
0842
0843
0844
0845
0846
0847
0848
0849
0850 void _Thread_Priority_update( Thread_queue_Context *queue_context );
0851
0852 #if defined(RTEMS_SMP)
0853
0854
0855
0856
0857
0858
0859 void _Thread_Priority_update_and_make_sticky( Thread_Control *the_thread );
0860
0861
0862
0863
0864
0865
0866
0867 void _Thread_Priority_update_and_clean_sticky( Thread_Control *the_thread );
0868
0869
0870
0871
0872
0873
0874 void _Thread_Priority_update_ignore_sticky( Thread_Control *the_thread );
0875 #endif
0876
0877
0878
0879
0880
0881
0882
0883
0884
0885
0886
0887 static inline bool _Thread_Priority_less_than(
0888 Priority_Control left,
0889 Priority_Control right
0890 )
0891 {
0892 return left > right;
0893 }
0894
0895
0896
0897
0898
0899
0900
0901
0902
0903
0904 static inline Priority_Control _Thread_Priority_highest(
0905 Priority_Control left,
0906 Priority_Control right
0907 )
0908 {
0909 return _Thread_Priority_less_than( left, right ) ? right : left;
0910 }
0911
0912
0913
0914
0915
0916
0917
0918
0919
0920
0921
0922
0923
0924 static inline Objects_Information *_Thread_Get_objects_information_by_id(
0925 Objects_Id id
0926 )
0927 {
0928 uint32_t the_api;
0929
0930 the_api = _Objects_Get_API( id );
0931
0932 if ( !_Objects_Is_api_valid( the_api ) ) {
0933 return NULL;
0934 }
0935
0936
0937
0938
0939
0940
0941
0942 return _Objects_Information_table[ the_api ][ 1 ];
0943 }
0944
0945
0946
0947
0948
0949
0950
0951
0952 static inline Thread_Information *_Thread_Get_objects_information(
0953 Thread_Control *the_thread
0954 )
0955 {
0956 size_t the_api;
0957 Thread_Information *information;
0958
0959 the_api = (size_t) _Objects_Get_API( the_thread->Object.id );
0960 _Assert( _Objects_Is_api_valid( the_api ) );
0961
0962 information = (Thread_Information *)
0963 _Objects_Information_table[ the_api ][ 1 ];
0964 _Assert( information != NULL );
0965
0966 return information;
0967 }
0968
0969
0970
0971
0972
0973
0974
0975
0976
0977 Thread_Control *_Thread_Get(
0978 Objects_Id id,
0979 ISR_lock_Context *lock_context
0980 );
0981
0982
0983
0984
0985
0986
0987 Objects_Id _Thread_Self_id( void );
0988
0989
0990
0991
0992
0993
0994
0995
0996 static inline Per_CPU_Control *_Thread_Get_CPU(
0997 const Thread_Control *thread
0998 )
0999 {
1000 #if defined(RTEMS_SMP)
1001 return thread->Scheduler.cpu;
1002 #else
1003 (void) thread;
1004
1005 return _Per_CPU_Get();
1006 #endif
1007 }
1008
1009
1010
1011
1012
1013
1014
1015 static inline void _Thread_Set_CPU(
1016 Thread_Control *thread,
1017 Per_CPU_Control *cpu
1018 )
1019 {
1020 #if defined(RTEMS_SMP)
1021 thread->Scheduler.cpu = cpu;
1022 #else
1023 (void) thread;
1024 (void) cpu;
1025 #endif
1026 }
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039 static inline bool _Thread_Is_executing (
1040 const Thread_Control *the_thread
1041 )
1042 {
1043 return ( the_thread == _Thread_Executing );
1044 }
1045
1046 #if defined(RTEMS_SMP)
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059 static inline bool _Thread_Is_executing_on_a_processor(
1060 const Thread_Control *the_thread
1061 )
1062 {
1063 return _CPU_Context_Get_is_executing( &the_thread->Registers );
1064 }
1065 #endif
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078 static inline bool _Thread_Is_heir (
1079 const Thread_Control *the_thread
1080 )
1081 {
1082 return ( the_thread == _Thread_Heir );
1083 }
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094 static inline void _Thread_Unblock (
1095 Thread_Control *the_thread
1096 )
1097 {
1098 _Thread_Clear_state( the_thread, STATES_BLOCKED );
1099 }
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116 #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
1117 static inline bool _Thread_Is_allocated_fp (
1118 const Thread_Control *the_thread
1119 )
1120 {
1121 return ( the_thread == _Thread_Allocated_fp );
1122 }
1123 #endif
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143 static inline void _Thread_Save_fp( Thread_Control *executing )
1144 {
1145 #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
1146 #if ( CPU_USE_DEFERRED_FP_SWITCH != TRUE )
1147 if ( executing->fp_context != NULL )
1148 _Context_Save_fp( &executing->fp_context );
1149 #endif
1150 #endif
1151 }
1152
1153
1154
1155
1156
1157
1158 static inline void _Thread_Restore_fp( Thread_Control *executing )
1159 {
1160 #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
1161 #if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
1162 if ( (executing->fp_context != NULL) &&
1163 !_Thread_Is_allocated_fp( executing ) ) {
1164 if ( _Thread_Allocated_fp != NULL )
1165 _Context_Save_fp( &_Thread_Allocated_fp->fp_context );
1166 _Context_Restore_fp( &executing->fp_context );
1167 _Thread_Allocated_fp = executing;
1168 }
1169 #else
1170 if ( executing->fp_context != NULL )
1171 _Context_Restore_fp( &executing->fp_context );
1172 #endif
1173 #endif
1174 }
1175
1176
1177
1178
1179
1180
1181
1182 #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
1183 static inline void _Thread_Deallocate_fp( void )
1184 {
1185 _Thread_Allocated_fp = NULL;
1186 }
1187 #endif
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198 static inline bool _Thread_Is_context_switch_necessary( void )
1199 {
1200 return ( _Thread_Dispatch_necessary );
1201 }
1202
1203
1204
1205
1206
1207
1208 static inline uint32_t _Thread_Get_maximum_internal_threads(void)
1209 {
1210
1211 uint32_t maximum_internal_threads =
1212 rtems_configuration_get_maximum_processors();
1213
1214
1215 #if defined(RTEMS_MULTIPROCESSING)
1216 if ( _System_state_Is_multiprocessing ) {
1217 ++maximum_internal_threads;
1218 }
1219 #endif
1220
1221 return maximum_internal_threads;
1222 }
1223
1224
1225
1226
1227
1228
1229
1230 static inline Thread_Control *_Thread_Internal_allocate( void )
1231 {
1232 return (Thread_Control *)
1233 _Objects_Allocate_unprotected( &_Thread_Information.Objects );
1234 }
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249 static inline Thread_Control *_Thread_Get_heir_and_make_it_executing(
1250 Per_CPU_Control *cpu_self
1251 )
1252 {
1253 Thread_Control *heir;
1254
1255 heir = cpu_self->heir;
1256 cpu_self->dispatch_necessary = false;
1257 cpu_self->executing = heir;
1258
1259 return heir;
1260 }
1261
1262
1263
1264
1265
1266
1267
1268
1269 static inline void _Thread_Update_CPU_time_used(
1270 Thread_Control *the_thread,
1271 Per_CPU_Control *cpu
1272 )
1273 {
1274 Timestamp_Control last;
1275 Timestamp_Control ran;
1276
1277 last = cpu->cpu_usage_timestamp;
1278 _TOD_Get_uptime( &cpu->cpu_usage_timestamp );
1279 _Timestamp_Subtract( &last, &cpu->cpu_usage_timestamp, &ran );
1280 _Timestamp_Add_to( &the_thread->cpu_time_used, &ran );
1281 }
1282
1283
1284
1285
1286
1287
1288
1289
1290 #if defined( RTEMS_SMP )
1291 static inline void _Thread_Dispatch_update_heir(
1292 Per_CPU_Control *cpu_self,
1293 Per_CPU_Control *cpu_for_heir,
1294 Thread_Control *heir
1295 )
1296 {
1297 _Thread_Update_CPU_time_used( cpu_for_heir->heir, cpu_for_heir );
1298
1299 cpu_for_heir->heir = heir;
1300
1301 _Thread_Dispatch_request( cpu_self, cpu_for_heir );
1302 }
1303 #endif
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314 Timestamp_Control _Thread_Get_CPU_time_used( Thread_Control *the_thread );
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326 Timestamp_Control _Thread_Get_CPU_time_used_locked(
1327 Thread_Control *the_thread
1328 );
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339 Timestamp_Control _Thread_Get_CPU_time_used_after_last_reset(
1340 Thread_Control *the_thread
1341 );
1342
1343
1344
1345
1346
1347
1348 static inline void _Thread_Action_control_initialize(
1349 Thread_Action_control *action_control
1350 )
1351 {
1352 _Chain_Initialize_empty( &action_control->Chain );
1353 }
1354
1355
1356
1357
1358
1359
1360 static inline void _Thread_Action_initialize(
1361 Thread_Action *action
1362 )
1363 {
1364 _Chain_Set_off_chain( &action->Node );
1365 }
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379 static inline void _Thread_Add_post_switch_action(
1380 Thread_Control *the_thread,
1381 Thread_Action *action,
1382 Thread_Action_handler handler
1383 )
1384 {
1385 Per_CPU_Control *cpu_of_thread;
1386
1387 _Assert( _Thread_State_is_owner( the_thread ) );
1388
1389 cpu_of_thread = _Thread_Get_CPU( the_thread );
1390
1391 action->handler = handler;
1392
1393 _Thread_Dispatch_request( _Per_CPU_Get(), cpu_of_thread );
1394
1395 _Chain_Append_if_is_off_chain_unprotected(
1396 &the_thread->Post_switch_actions.Chain,
1397 &action->Node
1398 );
1399 }
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412 static inline void _Thread_Append_post_switch_action(
1413 Thread_Control *the_thread,
1414 Thread_Action *action
1415 )
1416 {
1417 _Assert( _Thread_State_is_owner( the_thread ) );
1418 _Assert( action->handler != NULL );
1419
1420 _Chain_Append_unprotected(
1421 &the_thread->Post_switch_actions.Chain,
1422 &action->Node
1423 );
1424 }
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434 static inline bool _Thread_Is_life_restarting(
1435 Thread_Life_state life_state
1436 )
1437 {
1438 return ( life_state & THREAD_LIFE_RESTARTING ) != 0;
1439 }
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449 static inline bool _Thread_Is_life_terminating(
1450 Thread_Life_state life_state
1451 )
1452 {
1453 return ( life_state & THREAD_LIFE_TERMINATING ) != 0;
1454 }
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464 static inline bool _Thread_Is_life_change_allowed(
1465 Thread_Life_state life_state
1466 )
1467 {
1468 return ( life_state
1469 & ( THREAD_LIFE_PROTECTED | THREAD_LIFE_CHANGE_DEFERRED ) ) == 0;
1470 }
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480 static inline bool _Thread_Is_life_changing(
1481 Thread_Life_state life_state
1482 )
1483 {
1484 return ( life_state
1485 & ( THREAD_LIFE_RESTARTING | THREAD_LIFE_TERMINATING ) ) != 0;
1486 }
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496 static inline bool _Thread_Is_joinable(
1497 const Thread_Control *the_thread
1498 )
1499 {
1500 _Assert( _Thread_State_is_owner( the_thread ) );
1501 return ( the_thread->Life.state & THREAD_LIFE_DETACHED ) == 0;
1502 }
1503
1504
1505
1506
1507
1508
1509 static inline void _Thread_Resource_count_increment(
1510 Thread_Control *the_thread
1511 )
1512 {
1513 #if defined(RTEMS_SCORE_THREAD_ENABLE_RESOURCE_COUNT)
1514 ++the_thread->resource_count;
1515 #else
1516 (void) the_thread;
1517 #endif
1518 }
1519
1520
1521
1522
1523
1524
1525 static inline void _Thread_Resource_count_decrement(
1526 Thread_Control *the_thread
1527 )
1528 {
1529 #if defined(RTEMS_SCORE_THREAD_ENABLE_RESOURCE_COUNT)
1530 --the_thread->resource_count;
1531 #else
1532 (void) the_thread;
1533 #endif
1534 }
1535
1536 #if defined(RTEMS_SCORE_THREAD_ENABLE_RESOURCE_COUNT)
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548 static inline bool _Thread_Owns_resources(
1549 const Thread_Control *the_thread
1550 )
1551 {
1552 return the_thread->resource_count != 0;
1553 }
1554 #endif
1555
1556
1557
1558
1559
1560
1561
1562
1563 static inline const Scheduler_Control *_Thread_Scheduler_get_home(
1564 const Thread_Control *the_thread
1565 )
1566 {
1567 #if defined(RTEMS_SMP)
1568 return the_thread->Scheduler.home_scheduler;
1569 #else
1570 (void) the_thread;
1571 return &_Scheduler_Table[ 0 ];
1572 #endif
1573 }
1574
1575
1576
1577
1578
1579
1580
1581
1582 static inline Scheduler_Node *_Thread_Scheduler_get_home_node(
1583 const Thread_Control *the_thread
1584 )
1585 {
1586 #if defined(RTEMS_SMP)
1587 _Assert( !_Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
1588 return SCHEDULER_NODE_OF_THREAD_WAIT_NODE(
1589 _Chain_First( &the_thread->Scheduler.Wait_nodes )
1590 );
1591 #else
1592 return the_thread->Scheduler.nodes;
1593 #endif
1594 }
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604 static inline Scheduler_Node *_Thread_Scheduler_get_node_by_index(
1605 const Thread_Control *the_thread,
1606 size_t scheduler_index
1607 )
1608 {
1609 _Assert( scheduler_index < _Scheduler_Count );
1610 #if defined(RTEMS_SMP)
1611 return (Scheduler_Node *)
1612 ( (uintptr_t) the_thread->Scheduler.nodes
1613 + scheduler_index * _Scheduler_Node_size );
1614 #else
1615 (void) scheduler_index;
1616 return the_thread->Scheduler.nodes;
1617 #endif
1618 }
1619
1620 #if defined(RTEMS_SMP)
1621
1622
1623
1624
1625
1626
1627 static inline void _Thread_Scheduler_acquire_critical(
1628 Thread_Control *the_thread,
1629 ISR_lock_Context *lock_context
1630 )
1631 {
1632 _ISR_lock_Acquire( &the_thread->Scheduler.Lock, lock_context );
1633 }
1634
1635
1636
1637
1638
1639
1640
1641 static inline void _Thread_Scheduler_release_critical(
1642 Thread_Control *the_thread,
1643 ISR_lock_Context *lock_context
1644 )
1645 {
1646 _ISR_lock_Release( &the_thread->Scheduler.Lock, lock_context );
1647 }
1648
1649
1650
1651
1652
1653
1654 void _Thread_Scheduler_process_requests( Thread_Control *the_thread );
1655
1656
1657
1658
1659
1660
1661
1662
1663 static inline void _Thread_Scheduler_add_request(
1664 Thread_Control *the_thread,
1665 Scheduler_Node *scheduler_node,
1666 Scheduler_Node_request request
1667 )
1668 {
1669 ISR_lock_Context lock_context;
1670 Scheduler_Node_request current_request;
1671
1672 _Thread_Scheduler_acquire_critical( the_thread, &lock_context );
1673
1674 current_request = scheduler_node->Thread.request;
1675
1676 if ( current_request == SCHEDULER_NODE_REQUEST_NOT_PENDING ) {
1677 _Assert(
1678 request == SCHEDULER_NODE_REQUEST_ADD
1679 || request == SCHEDULER_NODE_REQUEST_REMOVE
1680 );
1681 _Assert( scheduler_node->Thread.next_request == NULL );
1682 scheduler_node->Thread.next_request = the_thread->Scheduler.requests;
1683 the_thread->Scheduler.requests = scheduler_node;
1684 } else if ( current_request != SCHEDULER_NODE_REQUEST_NOTHING ) {
1685 _Assert(
1686 ( current_request == SCHEDULER_NODE_REQUEST_ADD
1687 && request == SCHEDULER_NODE_REQUEST_REMOVE )
1688 || ( current_request == SCHEDULER_NODE_REQUEST_REMOVE
1689 && request == SCHEDULER_NODE_REQUEST_ADD )
1690 );
1691 request = SCHEDULER_NODE_REQUEST_NOTHING;
1692 }
1693
1694 scheduler_node->Thread.request = request;
1695
1696 _Thread_Scheduler_release_critical( the_thread, &lock_context );
1697 }
1698
1699
1700
1701
1702
1703
1704
1705
1706 static inline void _Thread_Scheduler_add_wait_node(
1707 Thread_Control *the_thread,
1708 Scheduler_Node *scheduler_node
1709 )
1710 {
1711 _Chain_Append_unprotected(
1712 &the_thread->Scheduler.Wait_nodes,
1713 &scheduler_node->Thread.Wait_node
1714 );
1715 _Thread_Scheduler_add_request(
1716 the_thread,
1717 scheduler_node,
1718 SCHEDULER_NODE_REQUEST_ADD
1719 );
1720 }
1721
1722
1723
1724
1725
1726
1727
1728
1729 static inline void _Thread_Scheduler_remove_wait_node(
1730 Thread_Control *the_thread,
1731 Scheduler_Node *scheduler_node
1732 )
1733 {
1734 _Chain_Extract_unprotected( &scheduler_node->Thread.Wait_node );
1735 _Thread_Scheduler_add_request(
1736 the_thread,
1737 scheduler_node,
1738 SCHEDULER_NODE_REQUEST_REMOVE
1739 );
1740 }
1741 #endif
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754 static inline Priority_Control _Thread_Get_priority(
1755 const Thread_Control *the_thread
1756 )
1757 {
1758 Scheduler_Node *scheduler_node;
1759
1760 scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
1761 return _Priority_Get_priority( &scheduler_node->Wait.Priority );
1762 }
1763
1764
1765
1766
1767
1768
1769
1770
1771 static inline Priority_Control _Thread_Get_unmapped_priority(
1772 const Thread_Control *the_thread
1773 )
1774 {
1775 return SCHEDULER_PRIORITY_UNMAP( _Thread_Get_priority( the_thread ) );
1776 }
1777
1778
1779
1780
1781
1782
1783
1784
1785 static inline Priority_Control _Thread_Get_unmapped_real_priority(
1786 const Thread_Control *the_thread
1787 )
1788 {
1789 return SCHEDULER_PRIORITY_UNMAP( the_thread->Real_priority.priority );
1790 }
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802 static inline void _Thread_Wait_acquire_default_critical(
1803 Thread_Control *the_thread,
1804 ISR_lock_Context *lock_context
1805 )
1806 {
1807 _ISR_lock_Acquire( &the_thread->Wait.Lock.Default, lock_context );
1808 }
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821 static inline Thread_Control *_Thread_Wait_acquire_default_for_executing(
1822 ISR_lock_Context *lock_context
1823 )
1824 {
1825 Thread_Control *executing;
1826
1827 _ISR_lock_ISR_disable( lock_context );
1828 executing = _Thread_Executing;
1829 _Thread_Wait_acquire_default_critical( executing, lock_context );
1830
1831 return executing;
1832 }
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843 static inline void _Thread_Wait_acquire_default(
1844 Thread_Control *the_thread,
1845 ISR_lock_Context *lock_context
1846 )
1847 {
1848 _ISR_lock_ISR_disable( lock_context );
1849 _Thread_Wait_acquire_default_critical( the_thread, lock_context );
1850 }
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862 static inline void _Thread_Wait_release_default_critical(
1863 Thread_Control *the_thread,
1864 ISR_lock_Context *lock_context
1865 )
1866 {
1867 _ISR_lock_Release( &the_thread->Wait.Lock.Default, lock_context );
1868 }
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878 static inline void _Thread_Wait_release_default(
1879 Thread_Control *the_thread,
1880 ISR_lock_Context *lock_context
1881 )
1882 {
1883 _Thread_Wait_release_default_critical( the_thread, lock_context );
1884 _ISR_lock_ISR_enable( lock_context );
1885 }
1886
1887 #if defined(RTEMS_SMP)
1888 #define THREAD_QUEUE_CONTEXT_OF_REQUEST( node ) \
1889 RTEMS_CONTAINER_OF( node, Thread_queue_Context, Lock_context.Wait.Gate.Node )
1890
1891
1892
1893
1894
1895
1896
1897 static inline void _Thread_Wait_remove_request_locked(
1898 Thread_Control *the_thread,
1899 Thread_queue_Lock_context *queue_lock_context
1900 )
1901 {
1902 Chain_Node *first;
1903
1904 _Chain_Extract_unprotected( &queue_lock_context->Wait.Gate.Node );
1905 first = _Chain_First( &the_thread->Wait.Lock.Pending_requests );
1906
1907 if ( first != _Chain_Tail( &the_thread->Wait.Lock.Pending_requests ) ) {
1908 _Thread_queue_Gate_open( (Thread_queue_Gate *) first );
1909 }
1910 }
1911
1912
1913
1914
1915
1916
1917
1918 static inline void _Thread_Wait_acquire_queue_critical(
1919 Thread_queue_Queue *queue,
1920 Thread_queue_Lock_context *queue_lock_context
1921 )
1922 {
1923 _Thread_queue_Queue_acquire_critical(
1924 queue,
1925 &_Thread_Executing->Potpourri_stats,
1926 &queue_lock_context->Lock_context
1927 );
1928 }
1929
1930
1931
1932
1933
1934
1935
1936 static inline void _Thread_Wait_release_queue_critical(
1937 Thread_queue_Queue *queue,
1938 Thread_queue_Lock_context *queue_lock_context
1939 )
1940 {
1941 _Thread_queue_Queue_release_critical(
1942 queue,
1943 &queue_lock_context->Lock_context
1944 );
1945 }
1946 #endif
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956 static inline void _Thread_Wait_acquire_critical(
1957 Thread_Control *the_thread,
1958 Thread_queue_Context *queue_context
1959 )
1960 {
1961 #if defined(RTEMS_SMP)
1962 Thread_queue_Queue *queue;
1963
1964 _Thread_Wait_acquire_default_critical(
1965 the_thread,
1966 &queue_context->Lock_context.Lock_context
1967 );
1968
1969 queue = the_thread->Wait.queue;
1970 queue_context->Lock_context.Wait.queue = queue;
1971
1972 if ( queue != NULL ) {
1973 _Thread_queue_Gate_add(
1974 &the_thread->Wait.Lock.Pending_requests,
1975 &queue_context->Lock_context.Wait.Gate
1976 );
1977 _Thread_Wait_release_default_critical(
1978 the_thread,
1979 &queue_context->Lock_context.Lock_context
1980 );
1981 _Thread_Wait_acquire_queue_critical( queue, &queue_context->Lock_context );
1982
1983 if ( queue_context->Lock_context.Wait.queue == NULL ) {
1984 _Thread_Wait_release_queue_critical(
1985 queue,
1986 &queue_context->Lock_context
1987 );
1988 _Thread_Wait_acquire_default_critical(
1989 the_thread,
1990 &queue_context->Lock_context.Lock_context
1991 );
1992 _Thread_Wait_remove_request_locked(
1993 the_thread,
1994 &queue_context->Lock_context
1995 );
1996 _Assert( the_thread->Wait.queue == NULL );
1997 }
1998 }
1999 #else
2000 (void) the_thread;
2001 (void) queue_context;
2002 #endif
2003 }
2004
2005
2006
2007
2008
2009
2010
2011
2012 static inline void _Thread_Wait_acquire(
2013 Thread_Control *the_thread,
2014 Thread_queue_Context *queue_context
2015 )
2016 {
2017 _ISR_lock_ISR_disable( &queue_context->Lock_context.Lock_context );
2018 _Thread_Wait_acquire_critical( the_thread, queue_context );
2019 }
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031 static inline void _Thread_Wait_release_critical(
2032 Thread_Control *the_thread,
2033 Thread_queue_Context *queue_context
2034 )
2035 {
2036 #if defined(RTEMS_SMP)
2037 Thread_queue_Queue *queue;
2038
2039 queue = queue_context->Lock_context.Wait.queue;
2040
2041 if ( queue != NULL ) {
2042 _Thread_Wait_release_queue_critical(
2043 queue, &queue_context->Lock_context
2044 );
2045 _Thread_Wait_acquire_default_critical(
2046 the_thread,
2047 &queue_context->Lock_context.Lock_context
2048 );
2049 _Thread_Wait_remove_request_locked(
2050 the_thread,
2051 &queue_context->Lock_context
2052 );
2053 }
2054
2055 _Thread_Wait_release_default_critical(
2056 the_thread,
2057 &queue_context->Lock_context.Lock_context
2058 );
2059 #else
2060 (void) the_thread;
2061 (void) queue_context;
2062 #endif
2063 }
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073 static inline void _Thread_Wait_release(
2074 Thread_Control *the_thread,
2075 Thread_queue_Context *queue_context
2076 )
2077 {
2078 _Thread_Wait_release_critical( the_thread, queue_context );
2079 _ISR_lock_ISR_enable( &queue_context->Lock_context.Lock_context );
2080 }
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096 static inline void _Thread_Wait_claim(
2097 Thread_Control *the_thread,
2098 Thread_queue_Queue *queue
2099 )
2100 {
2101 ISR_lock_Context lock_context;
2102
2103 _Thread_Wait_acquire_default_critical( the_thread, &lock_context );
2104
2105 _Assert( the_thread->Wait.queue == NULL );
2106
2107 #if defined(RTEMS_SMP)
2108 _Chain_Initialize_empty( &the_thread->Wait.Lock.Pending_requests );
2109 _Chain_Initialize_node( &the_thread->Wait.Lock.Tranquilizer.Node );
2110 _Thread_queue_Gate_close( &the_thread->Wait.Lock.Tranquilizer );
2111 #endif
2112
2113 the_thread->Wait.queue = queue;
2114
2115 _Thread_Wait_release_default_critical( the_thread, &lock_context );
2116 }
2117
2118
2119
2120
2121
2122
2123
2124
2125 static inline void _Thread_Wait_claim_finalize(
2126 Thread_Control *the_thread,
2127 const Thread_queue_Operations *operations
2128 )
2129 {
2130 the_thread->Wait.operations = operations;
2131 }
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144 static inline void _Thread_Wait_remove_request(
2145 Thread_Control *the_thread,
2146 Thread_queue_Lock_context *queue_lock_context
2147 )
2148 {
2149 #if defined(RTEMS_SMP)
2150 ISR_lock_Context lock_context;
2151
2152 _Thread_Wait_acquire_default( the_thread, &lock_context );
2153 _Thread_Wait_remove_request_locked( the_thread, queue_lock_context );
2154 _Thread_Wait_release_default( the_thread, &lock_context );
2155 #else
2156 (void) the_thread;
2157 (void) queue_lock_context;
2158 #endif
2159 }
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173 static inline void _Thread_Wait_restore_default(
2174 Thread_Control *the_thread
2175 )
2176 {
2177 #if defined(RTEMS_SMP)
2178 ISR_lock_Context lock_context;
2179 Chain_Node *node;
2180 const Chain_Node *tail;
2181
2182 _Thread_Wait_acquire_default_critical( the_thread, &lock_context );
2183
2184 node = _Chain_First( &the_thread->Wait.Lock.Pending_requests );
2185 tail = _Chain_Immutable_tail( &the_thread->Wait.Lock.Pending_requests );
2186
2187 if ( node != tail ) {
2188 do {
2189 Thread_queue_Context *queue_context;
2190
2191 queue_context = THREAD_QUEUE_CONTEXT_OF_REQUEST( node );
2192 queue_context->Lock_context.Wait.queue = NULL;
2193
2194 node = _Chain_Next( node );
2195 } while ( node != tail );
2196
2197 _Thread_queue_Gate_add(
2198 &the_thread->Wait.Lock.Pending_requests,
2199 &the_thread->Wait.Lock.Tranquilizer
2200 );
2201 } else {
2202 _Thread_queue_Gate_open( &the_thread->Wait.Lock.Tranquilizer );
2203 }
2204 #endif
2205
2206 the_thread->Wait.queue = NULL;
2207 the_thread->Wait.operations = &_Thread_queue_Operations_default;
2208
2209 #if defined(RTEMS_SMP)
2210 _Thread_Wait_release_default_critical( the_thread, &lock_context );
2211 #endif
2212 }
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232 static inline void _Thread_Wait_tranquilize(
2233 Thread_Control *the_thread
2234 )
2235 {
2236 #if defined(RTEMS_SMP)
2237 _Thread_queue_Gate_wait( &the_thread->Wait.Lock.Tranquilizer );
2238 #else
2239 (void) the_thread;
2240 #endif
2241 }
2242
2243
2244
2245
2246
2247
2248
2249
2250 static inline void _Thread_Wait_cancel(
2251 Thread_Control *the_thread,
2252 Thread_queue_Context *queue_context
2253 )
2254 {
2255 Thread_queue_Queue *queue;
2256
2257 queue = the_thread->Wait.queue;
2258
2259 if ( queue != NULL ) {
2260 #if defined(RTEMS_SMP)
2261 _Assert( queue_context->Lock_context.Wait.queue == queue );
2262 #endif
2263
2264 ( *the_thread->Wait.operations->extract )(
2265 queue,
2266 the_thread,
2267 queue_context
2268 );
2269 _Thread_Wait_restore_default( the_thread );
2270
2271 #if defined(RTEMS_SMP)
2272 _Assert( queue_context->Lock_context.Wait.queue == NULL );
2273 queue_context->Lock_context.Wait.queue = queue;
2274 #endif
2275 }
2276 }
2277
2278
2279
2280
2281 #define THREAD_WAIT_STATE_MASK 0xffU
2282
2283
2284
2285
2286
2287
2288
2289
2290 #define THREAD_WAIT_STATE_READY 0x0U
2291
2292
2293
2294
2295
2296
2297
2298
2299 #define THREAD_WAIT_STATE_INTEND_TO_BLOCK 0x1U
2300
2301
2302
2303
2304 #define THREAD_WAIT_STATE_BLOCKED 0x2U
2305
2306
2307
2308
2309 #define THREAD_WAIT_CLASS_MASK 0xff00U
2310
2311
2312
2313
2314 #define THREAD_WAIT_CLASS_EVENT 0x100U
2315
2316
2317
2318
2319 #define THREAD_WAIT_CLASS_SYSTEM_EVENT 0x200U
2320
2321
2322
2323
2324 #define THREAD_WAIT_CLASS_OBJECT 0x400U
2325
2326
2327
2328
2329 #define THREAD_WAIT_CLASS_PERIOD 0x800U
2330
2331
2332
2333
2334
2335
2336
2337 static inline void _Thread_Wait_flags_set(
2338 Thread_Control *the_thread,
2339 Thread_Wait_flags flags
2340 )
2341 {
2342 #if defined(RTEMS_SMP)
2343 _Atomic_Store_uint( &the_thread->Wait.flags, flags, ATOMIC_ORDER_RELAXED );
2344 #else
2345 the_thread->Wait.flags = flags;
2346 #endif
2347 }
2348
2349
2350
2351
2352
2353
2354
2355
2356 static inline Thread_Wait_flags _Thread_Wait_flags_get(
2357 const Thread_Control *the_thread
2358 )
2359 {
2360 #if defined(RTEMS_SMP)
2361 return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_RELAXED );
2362 #else
2363 return the_thread->Wait.flags;
2364 #endif
2365 }
2366
2367
2368
2369
2370
2371
2372
2373
2374 static inline Thread_Wait_flags _Thread_Wait_flags_get_acquire(
2375 const Thread_Control *the_thread
2376 )
2377 {
2378 #if defined(RTEMS_SMP)
2379 return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_ACQUIRE );
2380 #else
2381 return the_thread->Wait.flags;
2382 #endif
2383 }
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401 static inline bool _Thread_Wait_flags_try_change_release(
2402 Thread_Control *the_thread,
2403 Thread_Wait_flags expected_flags,
2404 Thread_Wait_flags desired_flags
2405 )
2406 {
2407 _Assert( _ISR_Get_level() != 0 );
2408
2409 #if defined(RTEMS_SMP)
2410 return _Atomic_Compare_exchange_uint(
2411 &the_thread->Wait.flags,
2412 &expected_flags,
2413 desired_flags,
2414 ATOMIC_ORDER_RELEASE,
2415 ATOMIC_ORDER_RELAXED
2416 );
2417 #else
2418 bool success = ( the_thread->Wait.flags == expected_flags );
2419
2420 if ( success ) {
2421 the_thread->Wait.flags = desired_flags;
2422 }
2423
2424 return success;
2425 #endif
2426 }
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441 static inline bool _Thread_Wait_flags_try_change_acquire(
2442 Thread_Control *the_thread,
2443 Thread_Wait_flags expected_flags,
2444 Thread_Wait_flags desired_flags
2445 )
2446 {
2447 #if defined(RTEMS_SMP)
2448 return _Atomic_Compare_exchange_uint(
2449 &the_thread->Wait.flags,
2450 &expected_flags,
2451 desired_flags,
2452 ATOMIC_ORDER_ACQUIRE,
2453 ATOMIC_ORDER_ACQUIRE
2454 );
2455 #else
2456 bool success;
2457 ISR_Level level;
2458
2459 _ISR_Local_disable( level );
2460
2461 success = _Thread_Wait_flags_try_change_release(
2462 the_thread,
2463 expected_flags,
2464 desired_flags
2465 );
2466
2467 _ISR_Local_enable( level );
2468 return success;
2469 #endif
2470 }
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488 Objects_Id _Thread_Wait_get_id( const Thread_Control *the_thread );
2489
2490
2491
2492
2493
2494
2495 static inline Status_Control _Thread_Wait_get_status(
2496 const Thread_Control *the_thread
2497 )
2498 {
2499 return (Status_Control) the_thread->Wait.return_code;
2500 }
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514 void _Thread_Continue( Thread_Control *the_thread, Status_Control status );
2515
2516
2517
2518
2519
2520
2521 void _Thread_Timeout( Watchdog_Control *the_watchdog );
2522
2523
2524
2525
2526
2527
2528
2529 static inline void _Thread_Timer_initialize(
2530 Thread_Timer_information *timer,
2531 Per_CPU_Control *cpu
2532 )
2533 {
2534 _ISR_lock_Initialize( &timer->Lock, "Thread Timer" );
2535 timer->header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_TICKS ];
2536 _Watchdog_Preinitialize( &timer->Watchdog, cpu );
2537 }
2538
2539
2540
2541
2542
2543
2544
2545
2546 static inline void _Thread_Add_timeout_ticks(
2547 Thread_Control *the_thread,
2548 Per_CPU_Control *cpu,
2549 Watchdog_Interval ticks
2550 )
2551 {
2552 ISR_lock_Context lock_context;
2553
2554 _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
2555
2556 the_thread->Timer.header =
2557 &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_TICKS ];
2558 the_thread->Timer.Watchdog.routine = _Thread_Timeout;
2559 _Watchdog_Per_CPU_insert_ticks( &the_thread->Timer.Watchdog, cpu, ticks );
2560
2561 _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
2562 }
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572 static inline void _Thread_Timer_insert_realtime(
2573 Thread_Control *the_thread,
2574 Per_CPU_Control *cpu,
2575 Watchdog_Service_routine_entry routine,
2576 uint64_t expire
2577 )
2578 {
2579 ISR_lock_Context lock_context;
2580 Watchdog_Header *header;
2581
2582 _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
2583
2584 header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_REALTIME ];
2585 the_thread->Timer.header = header;
2586 the_thread->Timer.Watchdog.routine = routine;
2587 _Watchdog_Per_CPU_insert( &the_thread->Timer.Watchdog, cpu, header, expire );
2588
2589 _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
2590 }
2591
2592
2593
2594
2595
2596
2597 static inline void _Thread_Timer_remove( Thread_Control *the_thread )
2598 {
2599 ISR_lock_Context lock_context;
2600
2601 _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
2602
2603 _Watchdog_Per_CPU_remove(
2604 &the_thread->Timer.Watchdog,
2605 #if defined(RTEMS_SMP)
2606 the_thread->Timer.Watchdog.cpu,
2607 #else
2608 _Per_CPU_Get(),
2609 #endif
2610 the_thread->Timer.header
2611 );
2612
2613 _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
2614 }
2615
2616
2617
2618
2619
2620
2621
2622
2623 static inline void _Thread_Remove_timer_and_unblock(
2624 Thread_Control *the_thread,
2625 Thread_queue_Queue *queue
2626 )
2627 {
2628 _Thread_Wait_tranquilize( the_thread );
2629 _Thread_Timer_remove( the_thread );
2630
2631 #if defined(RTEMS_MULTIPROCESSING)
2632 if ( _Objects_Is_local_id( the_thread->Object.id ) ) {
2633 _Thread_Unblock( the_thread );
2634 } else {
2635 _Thread_queue_Unblock_proxy( queue, the_thread );
2636 }
2637 #else
2638 (void) queue;
2639 _Thread_Unblock( the_thread );
2640 #endif
2641 }
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652 Status_Control _Thread_Set_name(
2653 Thread_Control *the_thread,
2654 const char *name
2655 );
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666 size_t _Thread_Get_name(
2667 const Thread_Control *the_thread,
2668 char *buffer,
2669 size_t buffer_size
2670 );
2671
2672 #if defined(RTEMS_SMP)
2673 #define THREAD_PIN_STEP 2
2674
2675 #define THREAD_PIN_PREEMPTION 1
2676
2677
2678
2679
2680
2681
2682
2683 void _Thread_Do_unpin(
2684 Thread_Control *executing,
2685 Per_CPU_Control *cpu_self
2686 );
2687 #endif
2688
2689
2690
2691
2692
2693
2694 static inline void _Thread_Pin( Thread_Control *executing )
2695 {
2696 #if defined(RTEMS_SMP)
2697 _Assert( executing == _Thread_Get_executing() );
2698
2699 executing->Scheduler.pin_level += THREAD_PIN_STEP;
2700 #else
2701 (void) executing;
2702 #endif
2703 }
2704
2705
2706
2707
2708
2709
2710
2711 static inline void _Thread_Unpin(
2712 Thread_Control *executing,
2713 Per_CPU_Control *cpu_self
2714 )
2715 {
2716 #if defined(RTEMS_SMP)
2717 unsigned int pin_level;
2718
2719 _Assert( executing == _Per_CPU_Get_executing( cpu_self ) );
2720
2721 pin_level = executing->Scheduler.pin_level;
2722 _Assert( pin_level > 0 );
2723
2724 if (
2725 RTEMS_PREDICT_TRUE(
2726 pin_level != ( THREAD_PIN_STEP | THREAD_PIN_PREEMPTION )
2727 )
2728 ) {
2729 executing->Scheduler.pin_level = pin_level - THREAD_PIN_STEP;
2730 } else {
2731 _Thread_Do_unpin( executing, cpu_self );
2732 }
2733 #else
2734 (void) executing;
2735 (void) cpu_self;
2736 #endif
2737 }
2738
2739
2740
2741 #ifdef __cplusplus
2742 }
2743 #endif
2744
2745 #if defined(RTEMS_MULTIPROCESSING)
2746 #include <rtems/score/threadmp.h>
2747 #endif
2748
2749 #ifdef __cplusplus
2750 extern "C" {
2751 #endif
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763 static inline void _Thread_Timer_remove_and_continue(
2764 Thread_Control *the_thread,
2765 Status_Control status
2766 )
2767 {
2768 _Thread_Timer_remove( the_thread );
2769 #if defined(RTEMS_MULTIPROCESSING)
2770 _Thread_MP_Extract_proxy( the_thread );
2771 #endif
2772 _Thread_Continue( the_thread, status );
2773 }
2774
2775 #ifdef __cplusplus
2776 }
2777 #endif
2778
2779 #endif
2780