Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:24:26

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /**
0004  * @file
0005  *
0006  * @ingroup RTEMSScoreHeap
0007  *
0008  * @brief This source file contains the implementation of
0009  *   _Heap_Allocate_aligned_with_boundary().
0010  */
0011 
0012 /*
0013  *  COPYRIGHT (c) 1989-1999.
0014  *  On-Line Applications Research Corporation (OAR).
0015  *
0016  *  Copyright (c) 2009 embedded brains GmbH & Co. KG
0017  *
0018  * Redistribution and use in source and binary forms, with or without
0019  * modification, are permitted provided that the following conditions
0020  * are met:
0021  * 1. Redistributions of source code must retain the above copyright
0022  *    notice, this list of conditions and the following disclaimer.
0023  * 2. Redistributions in binary form must reproduce the above copyright
0024  *    notice, this list of conditions and the following disclaimer in the
0025  *    documentation and/or other materials provided with the distribution.
0026  *
0027  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0028  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0029  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0030  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
0031  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0032  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0033  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0034  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0035  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0036  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
0037  * POSSIBILITY OF SUCH DAMAGE.
0038  */
0039 
0040 #ifdef HAVE_CONFIG_H
0041 #include "config.h"
0042 #endif
0043 
0044 #include <rtems/score/heapimpl.h>
0045 
0046 #ifndef HEAP_PROTECTION
0047   #define _Heap_Protection_free_delayed_blocks( heap, alloc_begin ) false
0048 #else
0049   static bool _Heap_Protection_free_delayed_blocks(
0050     Heap_Control *heap,
0051     uintptr_t alloc_begin
0052   )
0053   {
0054     bool search_again = false;
0055     uintptr_t const blocks_to_free_count =
0056       (heap->Protection.delayed_free_block_count
0057          + heap->Protection.delayed_free_fraction - 1)
0058       / heap->Protection.delayed_free_fraction;
0059 
0060     if ( alloc_begin == 0 && blocks_to_free_count > 0 ) {
0061       Heap_Block *block_to_free = heap->Protection.first_delayed_free_block;
0062       uintptr_t count = 0;
0063 
0064       for ( count = 0; count < blocks_to_free_count; ++count ) {
0065         Heap_Block *next_block_to_free;
0066 
0067         if ( !_Heap_Is_block_in_heap( heap, block_to_free ) ) {
0068           _Heap_Protection_block_error(
0069             heap,
0070             block_to_free,
0071             HEAP_ERROR_BAD_FREE_BLOCK
0072           );
0073         }
0074 
0075         next_block_to_free =
0076           block_to_free->Protection_begin.next_delayed_free_block;
0077         block_to_free->Protection_begin.next_delayed_free_block =
0078           HEAP_PROTECTION_OBOLUS;
0079 
0080         _Heap_Free(
0081           heap,
0082           (void *) _Heap_Alloc_area_of_block( block_to_free )
0083         );
0084 
0085         block_to_free = next_block_to_free;
0086       }
0087 
0088       heap->Protection.delayed_free_block_count -= blocks_to_free_count;
0089       heap->Protection.first_delayed_free_block = block_to_free;
0090 
0091       search_again = true;
0092     }
0093 
0094     return search_again;
0095   }
0096 
0097   void _Heap_Protection_free_all_delayed_blocks( Heap_Control *heap )
0098   {
0099     bool search_again;
0100 
0101     do {
0102       search_again = _Heap_Protection_free_delayed_blocks( heap, 0 );
0103     } while ( search_again );
0104   }
0105 #endif
0106 
0107 #ifdef RTEMS_HEAP_DEBUG
0108   static void _Heap_Check_allocation(
0109     const Heap_Control *heap,
0110     const Heap_Block *block,
0111     uintptr_t alloc_begin,
0112     uintptr_t alloc_size,
0113     uintptr_t alignment,
0114     uintptr_t boundary
0115   )
0116   {
0117     uintptr_t const min_block_size = heap->min_block_size;
0118     uintptr_t const page_size = heap->page_size;
0119 
0120     uintptr_t const block_begin = (uintptr_t) block;
0121     uintptr_t const block_size = _Heap_Block_size( block );
0122     uintptr_t const block_end = block_begin + block_size;
0123 
0124     uintptr_t const alloc_end = alloc_begin + alloc_size;
0125 
0126     uintptr_t const alloc_area_begin = _Heap_Alloc_area_of_block( block );
0127     uintptr_t const alloc_area_offset = alloc_begin - alloc_area_begin;
0128 
0129     _HAssert( block_size >= min_block_size );
0130     _HAssert( block_begin < block_end );
0131     _HAssert(
0132       _Heap_Is_aligned( block_begin + HEAP_BLOCK_HEADER_SIZE, page_size )
0133     );
0134     _HAssert(
0135       _Heap_Is_aligned( block_size, page_size )
0136     );
0137 
0138     _HAssert( alloc_end <= block_end + HEAP_ALLOC_BONUS );
0139     _HAssert( alloc_area_begin == block_begin + HEAP_BLOCK_HEADER_SIZE);
0140     _HAssert( alloc_area_offset < page_size );
0141 
0142     _HAssert( _Heap_Is_aligned( alloc_area_begin, page_size ) );
0143     if ( alignment == 0 ) {
0144       _HAssert( alloc_begin == alloc_area_begin );
0145     } else {
0146       _HAssert( _Heap_Is_aligned( alloc_begin, alignment ) );
0147     }
0148 
0149     if ( boundary != 0 ) {
0150       uintptr_t boundary_line = _Heap_Align_down( alloc_end, boundary );
0151 
0152       _HAssert( alloc_size <= boundary );
0153       _HAssert( boundary_line <= alloc_begin || alloc_end <= boundary_line );
0154     }
0155   }
0156 #else
0157   #define _Heap_Check_allocation( h, b, ab, as, ag, bd ) ((void) 0)
0158 #endif
0159 
0160 static uintptr_t _Heap_Check_block(
0161   const Heap_Control *heap,
0162   const Heap_Block *block,
0163   uintptr_t alloc_size,
0164   uintptr_t alignment,
0165   uintptr_t boundary
0166 )
0167 {
0168   uintptr_t const page_size = heap->page_size;
0169   uintptr_t const min_block_size = heap->min_block_size;
0170 
0171   uintptr_t const block_begin = (uintptr_t) block;
0172   uintptr_t const block_size = _Heap_Block_size( block );
0173   uintptr_t const block_end = block_begin + block_size;
0174 
0175   uintptr_t const alloc_begin_floor = _Heap_Alloc_area_of_block( block );
0176   uintptr_t const alloc_begin_ceiling = block_end - min_block_size
0177     + HEAP_BLOCK_HEADER_SIZE + page_size - 1;
0178 
0179   uintptr_t alloc_end = block_end + HEAP_ALLOC_BONUS;
0180   uintptr_t alloc_begin = alloc_end - alloc_size;
0181 
0182   alloc_begin = _Heap_Align_down( alloc_begin, alignment );
0183 
0184   /* Ensure that the we have a valid new block at the end */
0185   if ( alloc_begin > alloc_begin_ceiling ) {
0186     alloc_begin = _Heap_Align_down( alloc_begin_ceiling, alignment );
0187   }
0188 
0189   alloc_end = alloc_begin + alloc_size;
0190 
0191   /* Ensure boundary constaint */
0192   if ( boundary != 0 ) {
0193     uintptr_t const boundary_floor = alloc_begin_floor + alloc_size;
0194     uintptr_t boundary_line = _Heap_Align_down( alloc_end, boundary );
0195 
0196     while ( alloc_begin < boundary_line && boundary_line < alloc_end ) {
0197       if ( boundary_line < boundary_floor ) {
0198         return 0;
0199       }
0200       alloc_begin = boundary_line - alloc_size;
0201       alloc_begin = _Heap_Align_down( alloc_begin, alignment );
0202       alloc_end = alloc_begin + alloc_size;
0203       boundary_line = _Heap_Align_down( alloc_end, boundary );
0204     }
0205   }
0206 
0207   /* Ensure that the we have a valid new block at the beginning */
0208   if ( alloc_begin >= alloc_begin_floor ) {
0209     uintptr_t const alloc_block_begin =
0210       (uintptr_t) _Heap_Block_of_alloc_area( alloc_begin, page_size );
0211     uintptr_t const free_size = alloc_block_begin - block_begin;
0212 
0213     if ( free_size >= min_block_size || free_size == 0 ) {
0214       return alloc_begin;
0215     }
0216   }
0217 
0218   return 0;
0219 }
0220 
0221 void *_Heap_Allocate_aligned_with_boundary(
0222   Heap_Control *heap,
0223   uintptr_t alloc_size,
0224   uintptr_t alignment,
0225   uintptr_t boundary
0226 )
0227 {
0228   Heap_Statistics *const stats = &heap->stats;
0229   uintptr_t const block_size_floor = alloc_size + HEAP_BLOCK_HEADER_SIZE
0230     - HEAP_ALLOC_BONUS;
0231   uintptr_t const page_size = heap->page_size;
0232   Heap_Block *block = NULL;
0233   uintptr_t alloc_begin = 0;
0234   uint32_t search_count = 0;
0235   bool search_again = false;
0236 
0237   if ( block_size_floor < alloc_size ) {
0238     /* Integer overflow occurred */
0239     return NULL;
0240   }
0241 
0242   if ( boundary != 0 ) {
0243     if ( boundary < alloc_size ) {
0244       return NULL;
0245     }
0246 
0247     if ( alignment == 0 ) {
0248       alignment = page_size;
0249     }
0250   }
0251 
0252   do {
0253     Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
0254 
0255     block = _Heap_Free_list_first( heap );
0256     while ( block != free_list_tail ) {
0257       _HAssert( _Heap_Is_prev_used( block ) );
0258 
0259       _Heap_Protection_block_check( heap, block );
0260 
0261       /*
0262        * The HEAP_PREV_BLOCK_USED flag is always set in the block size_and_flag
0263        * field.  Thus the value is about one unit larger than the real block
0264        * size.  The greater than operator takes this into account.
0265        */
0266       if ( block->size_and_flag > block_size_floor ) {
0267         if ( alignment == 0 ) {
0268           alloc_begin = _Heap_Alloc_area_of_block( block );
0269         } else {
0270           alloc_begin = _Heap_Check_block(
0271             heap,
0272             block,
0273             alloc_size,
0274             alignment,
0275             boundary
0276           );
0277         }
0278       }
0279 
0280       /* Statistics */
0281       ++search_count;
0282 
0283       if ( alloc_begin != 0 ) {
0284         break;
0285       }
0286 
0287       block = block->next;
0288     }
0289 
0290     search_again = _Heap_Protection_free_delayed_blocks( heap, alloc_begin );
0291   } while ( search_again );
0292 
0293   if ( alloc_begin != 0 ) {
0294     block = _Heap_Block_allocate( heap, block, alloc_begin, alloc_size );
0295 
0296     _Heap_Check_allocation(
0297       heap,
0298       block,
0299       alloc_begin,
0300       alloc_size,
0301       alignment,
0302       boundary
0303     );
0304 
0305     /* Statistics */
0306     ++stats->allocs;
0307     stats->searches += search_count;
0308     stats->lifetime_allocated += _Heap_Block_size( block );
0309   } else {
0310     /* Statistics */
0311     ++stats->failed_allocs;
0312   }
0313 
0314   /* Statistics */
0315   if ( stats->max_search < search_count ) {
0316     stats->max_search = search_count;
0317   }
0318 
0319   return (void *) alloc_begin;
0320 }