Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:24:27

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /**
0004  * @file
0005  *
0006  * @ingroup RTEMSScoreThreadQueue
0007  *
0008  * @brief This source file contains the implementation of
0009  *   _Thread_queue_Deadlock_fatal(), _Thread_queue_Deadlock_status(),
0010  *   _Thread_queue_Do_dequeue(), _Thread_queue_Enqueue(),
0011  *   _Thread_queue_Enqueue_do_nothing_extra(), _Thread_queue_Enqueue_sticky(),
0012  *   _Thread_queue_Extract_locked(), _Thread_queue_Path_acquire(),
0013  *   _Thread_queue_Path_release(),
0014  *   _Thread_queue_Resume(),_Thread_queue_Surrender(),
0015  *   _Thread_queue_Surrender_no_priority(), _Thread_queue_Surrender_sticky().
0016  */
0017 
0018 /*
0019  *  COPYRIGHT (c) 1989-2014.
0020  *  On-Line Applications Research Corporation (OAR).
0021  *
0022  *  Copyright (C) 2015, 2016 embedded brains GmbH & Co. KG
0023  *
0024  * Redistribution and use in source and binary forms, with or without
0025  * modification, are permitted provided that the following conditions
0026  * are met:
0027  * 1. Redistributions of source code must retain the above copyright
0028  *    notice, this list of conditions and the following disclaimer.
0029  * 2. Redistributions in binary form must reproduce the above copyright
0030  *    notice, this list of conditions and the following disclaimer in the
0031  *    documentation and/or other materials provided with the distribution.
0032  *
0033  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0034  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0035  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0036  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
0037  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0038  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0039  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0040  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0041  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0042  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
0043  * POSSIBILITY OF SUCH DAMAGE.
0044  */
0045 
0046 #ifdef HAVE_CONFIG_H
0047 #include "config.h"
0048 #endif
0049 
0050 #include <rtems/score/threadqimpl.h>
0051 #include <rtems/score/assert.h>
0052 #include <rtems/score/threaddispatch.h>
0053 #include <rtems/score/threadimpl.h>
0054 #include <rtems/score/status.h>
0055 #include <rtems/score/watchdogimpl.h>
0056 
0057 #define THREAD_QUEUE_INTEND_TO_BLOCK \
0058   (THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_INTEND_TO_BLOCK)
0059 
0060 #define THREAD_QUEUE_BLOCKED \
0061   (THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_BLOCKED)
0062 
0063 #if defined(RTEMS_SMP)
0064 /*
0065  * A global registry of active thread queue links is used to provide deadlock
0066  * detection on SMP configurations.  This is simple to implement and no
0067  * additional storage is required for the thread queues.  The disadvantage is
0068  * the global registry is not scalable and may lead to lock contention.
0069  * However, the registry is only used in case of nested resource conflicts.  In
0070  * this case, the application is already in trouble.
0071  */
0072 
0073 typedef struct {
0074   ISR_lock_Control Lock;
0075 
0076   RBTree_Control Links;
0077 } Thread_queue_Links;
0078 
0079 static Thread_queue_Links _Thread_queue_Links = {
0080   ISR_LOCK_INITIALIZER( "Thread Queue Links" ),
0081   RBTREE_INITIALIZER_EMPTY( _Thread_queue_Links.Links )
0082 };
0083 
0084 static bool _Thread_queue_Link_equal(
0085   const void        *left,
0086   const RBTree_Node *right
0087 )
0088 {
0089   const Thread_queue_Queue *the_left;
0090   const Thread_queue_Link  *the_right;
0091 
0092   the_left = left;
0093   the_right = (Thread_queue_Link *) right;
0094 
0095   return the_left == the_right->source;
0096 }
0097 
0098 static bool _Thread_queue_Link_less(
0099   const void        *left,
0100   const RBTree_Node *right
0101 )
0102 {
0103   const Thread_queue_Queue *the_left;
0104   const Thread_queue_Link  *the_right;
0105 
0106   the_left = left;
0107   the_right = (Thread_queue_Link *) right;
0108 
0109   return (uintptr_t) the_left < (uintptr_t) the_right->source;
0110 }
0111 
0112 static void *_Thread_queue_Link_map( RBTree_Node *node )
0113 {
0114   return node;
0115 }
0116 
0117 static Thread_queue_Link *_Thread_queue_Link_find(
0118   Thread_queue_Links *links,
0119   Thread_queue_Queue *source
0120 )
0121 {
0122   return _RBTree_Find_inline(
0123     &links->Links,
0124     source,
0125     _Thread_queue_Link_equal,
0126     _Thread_queue_Link_less,
0127     _Thread_queue_Link_map
0128   );
0129 }
0130 
0131 static Thread_queue_Deadlock_status _Thread_queue_Link_add(
0132   Thread_queue_Link  *link,
0133   Thread_queue_Queue *source,
0134   Thread_queue_Queue *target
0135 )
0136 {
0137   Thread_queue_Links *links;
0138   Thread_queue_Queue *recursive_target;
0139   ISR_lock_Context    lock_context;
0140 
0141   link->source = source;
0142   link->target = target;
0143 
0144   links = &_Thread_queue_Links;
0145   recursive_target = target;
0146 
0147   _ISR_lock_Acquire( &links->Lock, &lock_context );
0148 
0149   while ( true ) {
0150     Thread_queue_Link *recursive_link;
0151 
0152     recursive_link = _Thread_queue_Link_find( links, recursive_target );
0153 
0154     if ( recursive_link == NULL ) {
0155       break;
0156     }
0157 
0158     recursive_target = recursive_link->target;
0159 
0160     if ( recursive_target == source ) {
0161       _ISR_lock_Release( &links->Lock, &lock_context );
0162       return THREAD_QUEUE_DEADLOCK_DETECTED;
0163     }
0164   }
0165 
0166   _RBTree_Insert_inline(
0167     &links->Links,
0168     &link->Registry_node,
0169     source,
0170     _Thread_queue_Link_less
0171   );
0172 
0173   _ISR_lock_Release( &links->Lock, &lock_context );
0174   return THREAD_QUEUE_NO_DEADLOCK;
0175 }
0176 
0177 static void _Thread_queue_Link_remove( Thread_queue_Link *link )
0178 {
0179   Thread_queue_Links *links;
0180   ISR_lock_Context    lock_context;
0181 
0182   links = &_Thread_queue_Links;
0183 
0184   _ISR_lock_Acquire( &links->Lock, &lock_context );
0185   _RBTree_Extract( &links->Links, &link->Registry_node );
0186   _ISR_lock_Release( &links->Lock, &lock_context );
0187 }
0188 #endif
0189 
0190 #if !defined(RTEMS_SMP)
0191 static
0192 #endif
0193 void _Thread_queue_Path_release( Thread_queue_Context *queue_context )
0194 {
0195 #if defined(RTEMS_SMP)
0196   Chain_Node *head;
0197   Chain_Node *node;
0198 
0199   head = _Chain_Head( &queue_context->Path.Links );
0200   node = _Chain_Last( &queue_context->Path.Links );
0201 
0202   while ( head != node ) {
0203     Thread_queue_Link *link;
0204 
0205     link = THREAD_QUEUE_LINK_OF_PATH_NODE( node );
0206 
0207     if ( link->Lock_context.Wait.queue != NULL ) {
0208       _Thread_queue_Link_remove( link );
0209       _Thread_Wait_release_queue_critical(
0210         link->Lock_context.Wait.queue,
0211         &link->Lock_context
0212       );
0213       _Thread_Wait_remove_request( link->owner, &link->Lock_context );
0214     } else {
0215       _Thread_Wait_release_default_critical(
0216         link->owner,
0217         &link->Lock_context.Lock_context
0218       );
0219     }
0220 
0221     node = _Chain_Previous( node );
0222 #if defined(RTEMS_DEBUG)
0223     _Chain_Set_off_chain( &link->Path_node );
0224 #endif
0225   }
0226 #else
0227   (void) queue_context;
0228 #endif
0229 }
0230 
0231 #if defined(RTEMS_SMP)
0232 static void _Thread_queue_Path_append_deadlock_thread(
0233   Thread_Control       *the_thread,
0234   Thread_queue_Context *queue_context
0235 )
0236 {
0237   Thread_Control *deadlock;
0238 
0239   /*
0240    * In case of a deadlock, we must obtain the thread wait default lock for the
0241    * first thread on the path that tries to enqueue on a thread queue.  This
0242    * thread can be identified by the thread wait operations.  This lock acquire
0243    * is necessary for the timeout and explicit thread priority changes, see
0244    * _Thread_Priority_perform_actions().
0245    */
0246 
0247   deadlock = NULL;
0248 
0249   while ( the_thread->Wait.operations != &_Thread_queue_Operations_default ) {
0250     the_thread = the_thread->Wait.queue->owner;
0251     deadlock = the_thread;
0252   }
0253 
0254   if ( deadlock != NULL ) {
0255     Thread_queue_Link *link;
0256 
0257     link = &queue_context->Path.Deadlock;
0258     _Chain_Initialize_node( &link->Path_node );
0259     _Chain_Append_unprotected(
0260       &queue_context->Path.Links,
0261       &link->Path_node
0262     );
0263     link->owner = deadlock;
0264     link->Lock_context.Wait.queue = NULL;
0265     _Thread_Wait_acquire_default_critical(
0266       deadlock,
0267       &link->Lock_context.Lock_context
0268     );
0269   }
0270 }
0271 #endif
0272 
0273 #if !defined(RTEMS_SMP)
0274 static
0275 #endif
0276 Thread_queue_Deadlock_status _Thread_queue_Path_acquire(
0277   Thread_queue_Queue   *queue,
0278   Thread_Control       *the_thread,
0279   Thread_queue_Context *queue_context
0280 )
0281 {
0282   Thread_Control     *owner;
0283 #if defined(RTEMS_SMP)
0284   Thread_queue_Link  *link;
0285   Thread_queue_Queue *target;
0286 
0287   /*
0288    * For an overview please look at the non-SMP part below.  In SMP
0289    * configurations, we basically do the same.  The fact that we may have more
0290    * than one executing thread and each thread queue has its own SMP lock makes
0291    * the procedure a bit more difficult.  We have to avoid deadlocks at SMP
0292    * lock level, since this would result in an unrecoverable deadlock of the
0293    * overall system.
0294    */
0295 
0296   _Chain_Initialize_empty( &queue_context->Path.Links );
0297 
0298   owner = queue->owner;
0299 
0300   if ( owner == NULL ) {
0301     return THREAD_QUEUE_NO_DEADLOCK;
0302   }
0303 
0304   if ( owner == the_thread ) {
0305     return THREAD_QUEUE_DEADLOCK_DETECTED;
0306   }
0307 
0308   _Chain_Initialize_node(
0309     &queue_context->Path.Start.Lock_context.Wait.Gate.Node
0310   );
0311   link = &queue_context->Path.Start;
0312   _RBTree_Initialize_node( &link->Registry_node );
0313   _Chain_Initialize_node( &link->Path_node );
0314 
0315   do {
0316     _Chain_Append_unprotected( &queue_context->Path.Links, &link->Path_node );
0317     link->owner = owner;
0318 
0319     _Thread_Wait_acquire_default_critical(
0320       owner,
0321       &link->Lock_context.Lock_context
0322     );
0323 
0324     target = owner->Wait.queue;
0325     link->Lock_context.Wait.queue = target;
0326 
0327     if ( target != NULL ) {
0328       Thread_queue_Deadlock_status deadlock_status;
0329 
0330       deadlock_status = _Thread_queue_Link_add( link, queue, target );
0331 
0332       if ( deadlock_status == THREAD_QUEUE_NO_DEADLOCK ) {
0333         _Thread_queue_Gate_add(
0334           &owner->Wait.Lock.Pending_requests,
0335           &link->Lock_context.Wait.Gate
0336         );
0337         _Thread_Wait_release_default_critical(
0338           owner,
0339           &link->Lock_context.Lock_context
0340         );
0341         _Thread_Wait_acquire_queue_critical( target, &link->Lock_context );
0342 
0343         if ( link->Lock_context.Wait.queue == NULL ) {
0344           _Thread_queue_Link_remove( link );
0345           _Thread_Wait_release_queue_critical( target, &link->Lock_context );
0346           _Thread_Wait_acquire_default_critical(
0347             owner,
0348             &link->Lock_context.Lock_context
0349           );
0350           _Thread_Wait_remove_request_locked( owner, &link->Lock_context );
0351           _Assert( owner->Wait.queue == NULL );
0352           return THREAD_QUEUE_NO_DEADLOCK;
0353         }
0354       } else {
0355         link->Lock_context.Wait.queue = NULL;
0356         _Thread_queue_Path_append_deadlock_thread( owner, queue_context );
0357         return THREAD_QUEUE_DEADLOCK_DETECTED;
0358       }
0359     } else {
0360       return THREAD_QUEUE_NO_DEADLOCK;
0361     }
0362 
0363     link = &owner->Wait.Link;
0364     queue = target;
0365     owner = queue->owner;
0366   } while ( owner != NULL );
0367 #else
0368   do {
0369     owner = queue->owner;
0370 
0371     if ( owner == NULL ) {
0372       return THREAD_QUEUE_NO_DEADLOCK;
0373     }
0374 
0375     if ( owner == the_thread ) {
0376       return THREAD_QUEUE_DEADLOCK_DETECTED;
0377     }
0378 
0379     queue = owner->Wait.queue;
0380   } while ( queue != NULL );
0381 #endif
0382 
0383   return THREAD_QUEUE_NO_DEADLOCK;
0384 }
0385 
0386 void _Thread_queue_Enqueue_do_nothing_extra(
0387   Thread_queue_Queue   *queue,
0388   Thread_Control       *the_thread,
0389   Per_CPU_Control      *cpu_self,
0390   Thread_queue_Context *queue_context
0391 )
0392 {
0393   /* Do nothing */
0394 }
0395 
0396 void _Thread_queue_Deadlock_status( Thread_Control *the_thread )
0397 {
0398   the_thread->Wait.return_code = STATUS_DEADLOCK;
0399 }
0400 
0401 void _Thread_queue_Deadlock_fatal( Thread_Control *the_thread )
0402 {
0403   (void) the_thread;
0404   _Internal_error( INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK );
0405 }
0406 
0407 void _Thread_queue_Enqueue(
0408   Thread_queue_Queue            *queue,
0409   const Thread_queue_Operations *operations,
0410   Thread_Control                *the_thread,
0411   Thread_queue_Context          *queue_context
0412 )
0413 {
0414   Thread_queue_Deadlock_status deadlock_status;
0415   Per_CPU_Control             *cpu_self;
0416   bool                         success;
0417 
0418   _Assert( queue_context->enqueue_callout != NULL );
0419 
0420 #if defined(RTEMS_MULTIPROCESSING)
0421   if ( _Thread_MP_Is_receive( the_thread ) && the_thread->receive_packet ) {
0422     the_thread = _Thread_MP_Allocate_proxy( queue_context->thread_state );
0423   }
0424 #endif
0425 
0426   _Thread_Wait_claim( the_thread, queue );
0427 
0428   deadlock_status =
0429     _Thread_queue_Path_acquire( queue, the_thread, queue_context );
0430 
0431   if ( deadlock_status == THREAD_QUEUE_DEADLOCK_DETECTED ) {
0432     _Thread_queue_Path_release( queue_context );
0433     _Thread_Wait_restore_default( the_thread );
0434     _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context );
0435     _Thread_Wait_tranquilize( the_thread );
0436     _Assert( queue_context->deadlock_callout != NULL );
0437     ( *queue_context->deadlock_callout )( the_thread );
0438     return;
0439   }
0440 
0441   _Thread_queue_Context_clear_priority_updates( queue_context );
0442   _Thread_Wait_claim_finalize( the_thread, operations );
0443   ( *operations->enqueue )( queue, the_thread, queue_context );
0444 
0445   _Thread_queue_Path_release( queue_context );
0446 
0447   the_thread->Wait.return_code = STATUS_SUCCESSFUL;
0448   _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK );
0449   cpu_self = _Thread_queue_Dispatch_disable( queue_context );
0450   _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context );
0451 
0452   ( *queue_context->enqueue_callout )(
0453     queue,
0454     the_thread,
0455     cpu_self,
0456     queue_context
0457   );
0458 
0459   /*
0460    *  Set the blocking state for this thread queue in the thread.
0461    */
0462   _Thread_Set_state( the_thread, queue_context->thread_state );
0463 
0464   /*
0465    * At this point thread dispatching is disabled, however, we already released
0466    * the thread queue lock.  Thus, interrupts or threads on other processors
0467    * may already changed our state with respect to the thread queue object.
0468    * The request could be satisfied or timed out.  This situation is indicated
0469    * by the thread wait flags.  Other parties must not modify our thread state
0470    * as long as we are in the THREAD_QUEUE_INTEND_TO_BLOCK thread wait state,
0471    * thus we have to cancel the blocking operation ourself if necessary.
0472    */
0473   success = _Thread_Wait_flags_try_change_acquire(
0474     the_thread,
0475     THREAD_QUEUE_INTEND_TO_BLOCK,
0476     THREAD_QUEUE_BLOCKED
0477   );
0478   if ( !success ) {
0479     _Thread_Remove_timer_and_unblock( the_thread, queue );
0480   }
0481 
0482   _Thread_Priority_update( queue_context );
0483   _Thread_Dispatch_direct( cpu_self );
0484 }
0485 
0486 #if defined(RTEMS_SMP)
0487 Status_Control _Thread_queue_Enqueue_sticky(
0488   Thread_queue_Queue            *queue,
0489   const Thread_queue_Operations *operations,
0490   Thread_Control                *the_thread,
0491   Thread_queue_Context          *queue_context
0492 )
0493 {
0494   Thread_queue_Deadlock_status deadlock_status;
0495   Per_CPU_Control             *cpu_self;
0496 
0497   _Assert( queue_context->enqueue_callout != NULL );
0498 
0499   _Thread_Wait_claim( the_thread, queue );
0500 
0501   deadlock_status =
0502     _Thread_queue_Path_acquire( queue, the_thread, queue_context );
0503 
0504   if ( deadlock_status == THREAD_QUEUE_DEADLOCK_DETECTED ) {
0505     _Thread_queue_Path_release( queue_context );
0506     _Thread_Wait_restore_default( the_thread );
0507     _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context );
0508     _Thread_Wait_tranquilize( the_thread );
0509     ( *queue_context->deadlock_callout )( the_thread );
0510     return _Thread_Wait_get_status( the_thread );
0511   }
0512 
0513   _Thread_queue_Context_clear_priority_updates( queue_context );
0514   _Thread_Wait_claim_finalize( the_thread, operations );
0515   ( *operations->enqueue )( queue, the_thread, queue_context );
0516 
0517   _Thread_queue_Path_release( queue_context );
0518 
0519   the_thread->Wait.return_code = STATUS_SUCCESSFUL;
0520   _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK );
0521   cpu_self = _Thread_queue_Dispatch_disable( queue_context );
0522   _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context );
0523 
0524   if ( cpu_self->thread_dispatch_disable_level != 1 ) {
0525     _Internal_error(
0526       INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_STICKY_FROM_BAD_STATE
0527     );
0528   }
0529 
0530   ( *queue_context->enqueue_callout )(
0531     queue,
0532     the_thread,
0533     cpu_self,
0534     queue_context
0535   );
0536 
0537   _Thread_Priority_update( queue_context );
0538   _Thread_Priority_update_and_make_sticky( the_thread );
0539   _Thread_Dispatch_enable( cpu_self );
0540 
0541   while (
0542     _Thread_Wait_flags_get_acquire( the_thread ) == THREAD_QUEUE_INTEND_TO_BLOCK
0543   ) {
0544     /* Wait */
0545   }
0546 
0547   _Thread_Wait_tranquilize( the_thread );
0548   _Thread_Timer_remove( the_thread );
0549   return _Thread_Wait_get_status( the_thread );
0550 }
0551 #endif
0552 
0553 #if defined(RTEMS_MULTIPROCESSING)
0554 bool _Thread_queue_MP_set_callout(
0555   Thread_Control             *the_thread,
0556   const Thread_queue_Context *queue_context
0557 )
0558 {
0559   Thread_Proxy_control    *the_proxy;
0560   Thread_queue_MP_callout  mp_callout;
0561 
0562   if ( _Objects_Is_local_id( the_thread->Object.id ) ) {
0563     return false;
0564   }
0565 
0566   the_proxy = (Thread_Proxy_control *) the_thread;
0567   mp_callout = queue_context->mp_callout;
0568   _Assert( mp_callout != NULL );
0569   the_proxy->thread_queue_callout = mp_callout;
0570   return true;
0571 }
0572 #endif
0573 
0574 static void _Thread_queue_Force_ready_again( Thread_Control *the_thread )
0575 {
0576   /*
0577    * We must set the wait flags under protection of the current thread lock,
0578    * otherwise a _Thread_Timeout() running on another processor may interfere.
0579    */
0580   _Thread_Wait_flags_set( the_thread, THREAD_WAIT_STATE_READY );
0581   _Thread_Wait_restore_default( the_thread );
0582 }
0583 
0584 static bool _Thread_queue_Make_ready_again( Thread_Control *the_thread )
0585 {
0586   bool success;
0587   bool unblock;
0588 
0589   /*
0590    * We must update the wait flags under protection of the current thread lock,
0591    * otherwise a _Thread_Timeout() running on another processor may interfere.
0592    */
0593   success = _Thread_Wait_flags_try_change_release(
0594     the_thread,
0595     THREAD_QUEUE_INTEND_TO_BLOCK,
0596     THREAD_WAIT_STATE_READY
0597   );
0598   if ( success ) {
0599     unblock = false;
0600   } else {
0601     _Assert( _Thread_Wait_flags_get( the_thread ) == THREAD_QUEUE_BLOCKED );
0602     _Thread_Wait_flags_set( the_thread, THREAD_WAIT_STATE_READY );
0603     unblock = true;
0604   }
0605 
0606   _Thread_Wait_restore_default( the_thread );
0607   return unblock;
0608 }
0609 
0610 /*
0611  * This function is used instead of _Thread_queue_Make_ready_again() in
0612  * _Thread_queue_Surrender() and _Thread_queue_Surrender_priority_ceiling()
0613  * since only the previous owner thread is allowed to surrender the thread
0614  * queue.
0615  *
0616  * In uniprocessor configurations, there is only one executing thread (in this
0617  * case the previous owner), so the new owner thread must be fully blocked.
0618  *
0619  * In SMP configurations, the new owner may execute on another processor in
0620  * parallel, so we have to use _Thread_queue_Make_ready_again().
0621  */
0622 static bool _Thread_queue_Make_new_owner_ready_again( Thread_Control *new_owner )
0623 {
0624 #if defined(RTEMS_SMP)
0625   return _Thread_queue_Make_ready_again( new_owner );
0626 #else
0627   _Assert( _Thread_Wait_flags_get( new_owner ) == THREAD_QUEUE_BLOCKED );
0628   _Thread_queue_Force_ready_again( new_owner );
0629   return false;
0630 #endif
0631 }
0632 
0633 static void _Thread_queue_Unblock_new_owner_and_remove_timer(
0634   Thread_queue_Queue *queue,
0635   Thread_Control     *new_owner,
0636   bool                unblock
0637 )
0638 {
0639 #if defined(RTEMS_SMP)
0640   if ( unblock ) {
0641     _Thread_Remove_timer_and_unblock( new_owner, queue );
0642   }
0643 #else
0644   (void) unblock;
0645   _Thread_Remove_timer_and_unblock( new_owner, queue );
0646 #endif
0647 }
0648 
0649 bool _Thread_queue_Extract_locked(
0650   Thread_queue_Queue            *queue,
0651   const Thread_queue_Operations *operations,
0652   Thread_Control                *the_thread,
0653   Thread_queue_Context          *queue_context
0654 )
0655 {
0656 #if defined(RTEMS_MULTIPROCESSING)
0657   _Thread_queue_MP_set_callout( the_thread, queue_context );
0658 #endif
0659   ( *operations->extract )( queue, the_thread, queue_context );
0660   return _Thread_queue_Make_ready_again( the_thread );
0661 }
0662 
0663 void _Thread_queue_Resume(
0664   Thread_queue_Queue   *queue,
0665   Thread_Control       *the_thread,
0666   Thread_queue_Context *queue_context
0667 )
0668 {
0669   bool unblock;
0670 
0671   unblock = _Thread_queue_Make_ready_again( the_thread );
0672 
0673   if ( unblock ) {
0674     Per_CPU_Control *cpu_self;
0675 
0676     cpu_self = _Thread_queue_Dispatch_disable( queue_context );
0677     _Thread_queue_Queue_release(
0678       queue, &queue_context->Lock_context.Lock_context
0679     );
0680 
0681     _Thread_Remove_timer_and_unblock( the_thread, queue );
0682 
0683     _Thread_Dispatch_enable( cpu_self );
0684   } else {
0685     _Thread_queue_Queue_release(
0686       queue, &queue_context->Lock_context.Lock_context
0687     );
0688   }
0689 }
0690 
0691 void _Thread_queue_Surrender(
0692   Thread_queue_Queue            *queue,
0693   Thread_queue_Heads            *heads,
0694   Thread_Control                *previous_owner,
0695   Thread_queue_Context          *queue_context,
0696   const Thread_queue_Operations *operations
0697 )
0698 {
0699   Thread_Control  *new_owner;
0700   bool             unblock;
0701   Per_CPU_Control *cpu_self;
0702 
0703   _Assert( heads != NULL );
0704 
0705   _Thread_queue_Context_clear_priority_updates( queue_context );
0706   new_owner = ( *operations->surrender )(
0707     queue,
0708     heads,
0709     previous_owner,
0710     queue_context
0711   );
0712   queue->owner = new_owner;
0713 
0714 #if defined(RTEMS_MULTIPROCESSING)
0715   if ( !_Thread_queue_MP_set_callout( new_owner, queue_context ) )
0716 #endif
0717   {
0718     _Thread_Resource_count_increment( new_owner );
0719   }
0720 
0721   unblock = _Thread_queue_Make_new_owner_ready_again( new_owner );
0722 
0723   cpu_self = _Thread_queue_Dispatch_disable( queue_context );
0724   _Thread_queue_Queue_release(
0725     queue,
0726     &queue_context->Lock_context.Lock_context
0727   );
0728 
0729   _Thread_Priority_update( queue_context );
0730   _Thread_queue_Unblock_new_owner_and_remove_timer(
0731     queue,
0732     new_owner,
0733     unblock
0734   );
0735 
0736   _Thread_Dispatch_enable( cpu_self );
0737 }
0738 
0739 void _Thread_queue_Surrender_no_priority(
0740   Thread_queue_Queue            *queue,
0741   Thread_queue_Heads            *heads,
0742   Thread_queue_Context          *queue_context,
0743   const Thread_queue_Operations *operations
0744 )
0745 {
0746   Thread_Control  *the_thread;
0747 
0748   _Assert( heads != NULL );
0749   _Assert( queue->owner == NULL );
0750 
0751   the_thread = ( *operations->surrender )( queue, heads, NULL, queue_context );
0752 
0753 #if defined(RTEMS_MULTIPROCESSING)
0754   _Thread_queue_MP_set_callout( the_thread, queue_context );
0755 #endif
0756 
0757   _Thread_queue_Resume( queue, the_thread, queue_context );
0758 }
0759 
0760 Status_Control _Thread_queue_Surrender_priority_ceiling(
0761   Thread_queue_Queue            *queue,
0762   Thread_Control                *executing,
0763   Priority_Node                 *priority_ceiling,
0764   Thread_queue_Context          *queue_context,
0765   const Thread_queue_Operations *operations
0766 )
0767 {
0768   ISR_lock_Context    lock_context;
0769   Thread_queue_Heads *heads;
0770   Thread_Control     *new_owner;
0771   bool                unblock;
0772   Per_CPU_Control    *cpu_self;
0773 
0774   _Thread_Resource_count_decrement( executing );
0775 
0776   _Thread_queue_Context_clear_priority_updates( queue_context );
0777   _Thread_Wait_acquire_default_critical( executing, &lock_context );
0778   _Thread_Priority_remove( executing, priority_ceiling, queue_context );
0779   _Thread_Wait_release_default_critical( executing, &lock_context );
0780 
0781   heads = queue->heads;
0782   queue->owner = NULL;
0783 
0784   if ( heads == NULL ) {
0785     cpu_self = _Thread_Dispatch_disable_critical(
0786       &queue_context->Lock_context.Lock_context
0787     );
0788     _Thread_queue_Queue_release(
0789       queue,
0790       &queue_context->Lock_context.Lock_context
0791     );
0792     _Thread_Priority_update( queue_context );
0793     _Thread_Dispatch_enable( cpu_self );
0794     return STATUS_SUCCESSFUL;
0795   }
0796 
0797   new_owner = ( *operations->surrender )(
0798     queue,
0799     heads,
0800     NULL,
0801     queue_context
0802   );
0803 
0804   queue->owner = new_owner;
0805 
0806   unblock = _Thread_queue_Make_new_owner_ready_again( new_owner );
0807 
0808 #if defined(RTEMS_MULTIPROCESSING)
0809   if ( _Objects_Is_local_id( new_owner->Object.id ) )
0810 #endif
0811   {
0812     _Thread_Resource_count_increment( new_owner );
0813     _Thread_Wait_acquire_default_critical( new_owner, &lock_context );
0814     _Thread_Priority_add( new_owner, priority_ceiling, queue_context );
0815     _Thread_Wait_release_default_critical( new_owner, &lock_context );
0816   }
0817 
0818   cpu_self = _Thread_queue_Dispatch_disable( queue_context );
0819   _Thread_queue_Queue_release(
0820     queue,
0821     &queue_context->Lock_context.Lock_context
0822   );
0823 
0824   _Thread_Priority_update( queue_context );
0825   _Thread_queue_Unblock_new_owner_and_remove_timer(
0826     queue,
0827     new_owner,
0828     unblock
0829   );
0830 
0831   _Thread_Dispatch_enable( cpu_self );
0832   return STATUS_SUCCESSFUL;
0833 }
0834 
0835 #if defined(RTEMS_SMP)
0836 void _Thread_queue_Surrender_sticky(
0837   Thread_queue_Queue            *queue,
0838   Thread_queue_Heads            *heads,
0839   Thread_Control                *previous_owner,
0840   Thread_queue_Context          *queue_context,
0841   const Thread_queue_Operations *operations
0842 )
0843 {
0844   Thread_Control  *new_owner;
0845   Per_CPU_Control *cpu_self;
0846 
0847   _Assert( heads != NULL );
0848 
0849   _Thread_queue_Context_clear_priority_updates( queue_context );
0850   new_owner = ( *operations->surrender )(
0851     queue,
0852     heads,
0853     previous_owner,
0854     queue_context
0855   );
0856   queue->owner = new_owner;
0857 
0858   /*
0859    * There is no need to unblock the thread, since in the corresponding
0860    * _Thread_queue_Enqueue_sticky() the thread is not blocked by the scheduler.
0861    * Instead, the thread busy waits for a change of its thread wait flags.
0862    * Timeouts cannot interfere since we hold the thread queue lock.
0863    */
0864   _Assert(
0865     _Thread_Wait_flags_get( new_owner ) == THREAD_QUEUE_INTEND_TO_BLOCK
0866   );
0867   _Thread_queue_Force_ready_again( new_owner );
0868 
0869   cpu_self = _Thread_queue_Dispatch_disable( queue_context );
0870   _Thread_queue_Queue_release(
0871     queue,
0872     &queue_context->Lock_context.Lock_context
0873   );
0874   _Thread_Priority_update_and_clean_sticky( previous_owner );
0875   _Thread_Priority_update_ignore_sticky( new_owner );
0876   _Thread_Dispatch_enable( cpu_self );
0877 }
0878 #endif
0879 
0880 #if defined(RTEMS_MULTIPROCESSING)
0881 void _Thread_queue_Unblock_proxy(
0882   Thread_queue_Queue *queue,
0883   Thread_Control     *the_thread
0884 )
0885 {
0886   const Thread_queue_Object *the_queue_object;
0887   Thread_Proxy_control      *the_proxy;
0888   Thread_queue_MP_callout    mp_callout;
0889 
0890   the_queue_object = THREAD_QUEUE_QUEUE_TO_OBJECT( queue );
0891   the_proxy = (Thread_Proxy_control *) the_thread;
0892   mp_callout = the_proxy->thread_queue_callout;
0893   ( *mp_callout )( the_thread, the_queue_object->Object.id );
0894 
0895   _Thread_MP_Free_proxy( the_thread );
0896 }
0897 #endif