Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:24:26

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /**
0004  * @file
0005  *
0006  * @ingroup RTEMSScoreHeap
0007  *
0008  * @brief This source file contains the implementation of
0009  *   _Heap_Initialize() and _Heap_Block_allocate().
0010  */
0011 
0012 /*
0013  *  COPYRIGHT (c) 1989-2009.
0014  *  On-Line Applications Research Corporation (OAR).
0015  *
0016  *  Copyright (C) 2009, 2010 embedded brains GmbH & Co. KG
0017  *
0018  * Redistribution and use in source and binary forms, with or without
0019  * modification, are permitted provided that the following conditions
0020  * are met:
0021  * 1. Redistributions of source code must retain the above copyright
0022  *    notice, this list of conditions and the following disclaimer.
0023  * 2. Redistributions in binary form must reproduce the above copyright
0024  *    notice, this list of conditions and the following disclaimer in the
0025  *    documentation and/or other materials provided with the distribution.
0026  *
0027  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0028  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0029  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0030  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
0031  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0032  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0033  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0034  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0035  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0036  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
0037  * POSSIBILITY OF SUCH DAMAGE.
0038  */
0039 
0040 #ifdef HAVE_CONFIG_H
0041 #include "config.h"
0042 #endif
0043 
0044 #include <rtems/score/heapimpl.h>
0045 #include <rtems/score/threadimpl.h>
0046 #include <rtems/score/interr.h>
0047 
0048 #include <string.h>
0049 
0050 #if CPU_ALIGNMENT == 0 || CPU_ALIGNMENT % 2 != 0
0051   #error "invalid CPU_ALIGNMENT value"
0052 #endif
0053 
0054 /*
0055  *  _Heap_Initialize
0056  *
0057  *  This kernel routine initializes a heap.
0058  *
0059  *  Input parameters:
0060  *    heap         - pointer to heap header
0061  *    area_begin - starting address of heap
0062  *    size             - size of heap
0063  *    page_size        - allocatable unit of memory
0064  *
0065  *  Output parameters:
0066  *    returns - maximum memory available if RTEMS_SUCCESSFUL
0067  *    0       - otherwise
0068  *
0069  *  This is what a heap looks like in memory immediately after initialization:
0070  *
0071  *
0072  *            +--------------------------------+ <- begin = area_begin
0073  *            |  unused space due to alignment |
0074  *            |       size < page_size         |
0075  *         0  +--------------------------------+ <- first block
0076  *            |  prev_size = page_size         |
0077  *         4  +--------------------------------+
0078  *            |  size = size0              | 1 |
0079  *         8  +---------------------+----------+ <- aligned on page_size
0080  *            |  next = HEAP_TAIL   |          |
0081  *        12  +---------------------+          |
0082  *            |  prev = HEAP_HEAD   |  memory  |
0083  *            +---------------------+          |
0084  *            |                     available  |
0085  *            |                                |
0086  *            |                for allocation  |
0087  *            |                                |
0088  *     size0  +--------------------------------+ <- last dummy block
0089  *            |  prev_size = size0             |
0090  *        +4  +--------------------------------+
0091  *            |  size = page_size          | 0 | <- prev block is free
0092  *        +8  +--------------------------------+ <- aligned on page_size
0093  *            |  unused space due to alignment |
0094  *            |       size < page_size         |
0095  *            +--------------------------------+ <- end = begin + size
0096  *
0097  *  Below is what a heap looks like after first allocation of SIZE bytes using
0098  *  _Heap_allocate(). BSIZE stands for SIZE + 4 aligned up on 'page_size'
0099  *  boundary.
0100  *  [NOTE: If allocation were performed by _Heap_Allocate_aligned(), the
0101  *  block size BSIZE is defined differently, and previously free block will
0102  *  be split so that upper part of it will become used block (see
0103  *  'heapallocatealigned.c' for details).]
0104  *
0105  *            +--------------------------------+ <- begin = area_begin
0106  *            |  unused space due to alignment |
0107  *            |       size < page_size         |
0108  *         0  +--------------------------------+ <- used block
0109  *            |  prev_size = page_size         |
0110  *         4  +--------------------------------+
0111  *            |  size = BSIZE              | 1 | <- prev block is used
0112  *         8  +--------------------------------+ <- aligned on page_size
0113  *            |              .                 | Pointer returned to the user
0114  *            |              .                 | is 8 for _Heap_Allocate()
0115  *            |              .                 | and is in range
0116  * 8 +        |         user-accessible        | [8,8+page_size) for
0117  *  page_size +- - -                      - - -+ _Heap_Allocate_aligned()
0118  *            |             area               |
0119  *            |              .                 |
0120  *     BSIZE  +- - - - -     .        - - - - -+ <- free block
0121  *            |              .                 |
0122  * BSIZE  +4  +--------------------------------+
0123  *            |  size = S = size0 - BSIZE  | 1 | <- prev block is used
0124  * BSIZE  +8  +-------------------+------------+ <- aligned on page_size
0125  *            |  next = HEAP_TAIL |            |
0126  * BSIZE +12  +-------------------+            |
0127  *            |  prev = HEAP_HEAD |     memory |
0128  *            +-------------------+            |
0129  *            |                   .  available |
0130  *            |                   .            |
0131  *            |                   .        for |
0132  *            |                   .            |
0133  * BSIZE +S+0 +-------------------+ allocation + <- last dummy block
0134  *            |  prev_size = S    |            |
0135  *       +S+4 +-------------------+------------+
0136  *            |  size = page_size          | 0 | <- prev block is free
0137  *       +S+8 +--------------------------------+ <- aligned on page_size
0138  *            |  unused space due to alignment |
0139  *            |       size < page_size         |
0140  *            +--------------------------------+ <- end = begin + size
0141  *
0142  */
0143 
0144 #ifdef HEAP_PROTECTION
0145   static void _Heap_Protection_block_initialize_default(
0146     Heap_Control *heap,
0147     Heap_Block *block
0148   )
0149   {
0150     block->Protection_begin.protector [0] = HEAP_BEGIN_PROTECTOR_0;
0151     block->Protection_begin.protector [1] = HEAP_BEGIN_PROTECTOR_1;
0152     block->Protection_begin.next_delayed_free_block = NULL;
0153     block->Protection_begin.task = _Thread_Get_executing();
0154     block->Protection_begin.tag = NULL;
0155     block->Protection_end.protector [0] = HEAP_END_PROTECTOR_0;
0156     block->Protection_end.protector [1] = HEAP_END_PROTECTOR_1;
0157   }
0158 
0159   static void _Heap_Protection_block_check_default(
0160     Heap_Control *heap,
0161     Heap_Block *block
0162   )
0163   {
0164     if (
0165       block->Protection_begin.protector [0] != HEAP_BEGIN_PROTECTOR_0
0166         || block->Protection_begin.protector [1] != HEAP_BEGIN_PROTECTOR_1
0167         || block->Protection_end.protector [0] != HEAP_END_PROTECTOR_0
0168         || block->Protection_end.protector [1] != HEAP_END_PROTECTOR_1
0169     ) {
0170       _Heap_Protection_block_error( heap, block, HEAP_ERROR_BROKEN_PROTECTOR );
0171     }
0172   }
0173 
0174   static void _Heap_Protection_block_error_default(
0175     Heap_Control *heap,
0176     Heap_Block *block,
0177     Heap_Error_reason reason
0178   )
0179   {
0180     Heap_Error_context error_context = {
0181       .heap = heap,
0182       .block = block,
0183       .reason = reason
0184     };
0185 
0186     _Terminate( RTEMS_FATAL_SOURCE_HEAP, (uintptr_t) &error_context );
0187   }
0188 #endif
0189 
0190 bool _Heap_Get_first_and_last_block(
0191   uintptr_t heap_area_begin,
0192   uintptr_t heap_area_size,
0193   uintptr_t page_size,
0194   uintptr_t min_block_size,
0195   Heap_Block **first_block_ptr,
0196   Heap_Block **last_block_ptr
0197 )
0198 {
0199   uintptr_t const heap_area_end = heap_area_begin + heap_area_size;
0200   uintptr_t const alloc_area_begin =
0201     _Heap_Align_up( heap_area_begin + HEAP_BLOCK_HEADER_SIZE, page_size );
0202   uintptr_t const first_block_begin =
0203     alloc_area_begin - HEAP_BLOCK_HEADER_SIZE;
0204   uintptr_t const overhead =
0205     HEAP_BLOCK_HEADER_SIZE + (first_block_begin - heap_area_begin);
0206   uintptr_t const first_block_size =
0207     _Heap_Align_down( heap_area_size - overhead, page_size );
0208   Heap_Block *const first_block = (Heap_Block *) first_block_begin;
0209   Heap_Block *const last_block =
0210     _Heap_Block_at( first_block, first_block_size );
0211 
0212   if (
0213     heap_area_end < heap_area_begin
0214       || heap_area_size <= overhead
0215       || first_block_size < min_block_size
0216   ) {
0217     /* Invalid area or area too small */
0218     return false;
0219   }
0220 
0221   *first_block_ptr = first_block;
0222   *last_block_ptr = last_block;
0223 
0224   return true;
0225 }
0226 
0227 uintptr_t _Heap_Initialize(
0228   Heap_Control *heap,
0229   void *heap_area_begin_ptr,
0230   uintptr_t heap_area_size,
0231   uintptr_t page_size
0232 )
0233 {
0234   Heap_Statistics *const stats = &heap->stats;
0235   uintptr_t const heap_area_begin = (uintptr_t) heap_area_begin_ptr;
0236   uintptr_t const heap_area_end = heap_area_begin + heap_area_size;
0237   uintptr_t first_block_begin = 0;
0238   uintptr_t first_block_size = 0;
0239   uintptr_t last_block_begin = 0;
0240   uintptr_t min_block_size = 0;
0241   bool area_ok = false;
0242   Heap_Block *first_block = NULL;
0243   Heap_Block *last_block = NULL;
0244 
0245   if ( page_size == 0 ) {
0246     page_size = CPU_ALIGNMENT;
0247   } else {
0248     page_size = _Heap_Align_up( page_size, CPU_ALIGNMENT );
0249 
0250     if ( page_size < CPU_ALIGNMENT ) {
0251       /* Integer overflow */
0252       return 0;
0253     }
0254   }
0255 
0256   min_block_size = _Heap_Min_block_size( page_size );
0257 
0258   area_ok = _Heap_Get_first_and_last_block(
0259     heap_area_begin,
0260     heap_area_size,
0261     page_size,
0262     min_block_size,
0263     &first_block,
0264     &last_block
0265   );
0266   if ( !area_ok ) {
0267     return 0;
0268   }
0269 
0270   memset(heap, 0, sizeof(*heap));
0271 
0272   #ifdef HEAP_PROTECTION
0273     heap->Protection.block_initialize = _Heap_Protection_block_initialize_default;
0274     heap->Protection.block_check = _Heap_Protection_block_check_default;
0275     heap->Protection.block_error = _Heap_Protection_block_error_default;
0276   #endif
0277 
0278   first_block_begin = (uintptr_t) first_block;
0279   last_block_begin = (uintptr_t) last_block;
0280   first_block_size = last_block_begin - first_block_begin;
0281 
0282   /* First block */
0283   first_block->prev_size = heap_area_end;
0284   first_block->size_and_flag = first_block_size | HEAP_PREV_BLOCK_USED;
0285   first_block->next = _Heap_Free_list_tail( heap );
0286   first_block->prev = _Heap_Free_list_head( heap );
0287   _Heap_Protection_block_initialize( heap, first_block );
0288 
0289   /* Heap control */
0290   heap->page_size = page_size;
0291   heap->min_block_size = min_block_size;
0292   heap->area_begin = heap_area_begin;
0293   heap->area_end = heap_area_end;
0294   heap->first_block = first_block;
0295   heap->last_block = last_block;
0296   _Heap_Free_list_head( heap )->next = first_block;
0297   _Heap_Free_list_tail( heap )->prev = first_block;
0298 
0299   /* Last block */
0300   last_block->prev_size = first_block_size;
0301   last_block->size_and_flag = 0;
0302   _Heap_Set_last_block_size( heap );
0303   _Heap_Protection_block_initialize( heap, last_block );
0304 
0305   /* Statistics */
0306   stats->size = first_block_size;
0307   stats->free_size = first_block_size;
0308   stats->min_free_size = first_block_size;
0309   stats->free_blocks = 1;
0310   stats->max_free_blocks = 1;
0311 
0312   _Heap_Protection_set_delayed_free_fraction( heap, 2 );
0313 
0314   _HAssert( _Heap_Is_aligned( heap->page_size, CPU_ALIGNMENT ) );
0315   _HAssert( _Heap_Is_aligned( heap->min_block_size, page_size ) );
0316   _HAssert(
0317     _Heap_Is_aligned( _Heap_Alloc_area_of_block( first_block ), page_size )
0318   );
0319   _HAssert(
0320     _Heap_Is_aligned( _Heap_Alloc_area_of_block( last_block ), page_size )
0321   );
0322 
0323   return first_block_size;
0324 }
0325 
0326 static void _Heap_Block_split(
0327   Heap_Control *heap,
0328   Heap_Block *block,
0329   Heap_Block *next_block,
0330   Heap_Block *free_list_anchor,
0331   uintptr_t alloc_size
0332 )
0333 {
0334   Heap_Statistics *const stats = &heap->stats;
0335 
0336   uintptr_t const page_size = heap->page_size;
0337   uintptr_t const min_block_size = heap->min_block_size;
0338   uintptr_t const min_alloc_size = min_block_size - HEAP_BLOCK_HEADER_SIZE;
0339 
0340   uintptr_t const block_size = _Heap_Block_size( block );
0341 
0342   uintptr_t const used_size =
0343     _Heap_Max( alloc_size, min_alloc_size ) + HEAP_BLOCK_HEADER_SIZE;
0344   uintptr_t const used_block_size = _Heap_Align_up( used_size, page_size );
0345 
0346   uintptr_t const free_size = block_size + HEAP_ALLOC_BONUS - used_size;
0347   uintptr_t const free_size_limit = min_block_size + HEAP_ALLOC_BONUS;
0348 
0349   _HAssert( used_size <= block_size + HEAP_ALLOC_BONUS );
0350   _HAssert( used_size + free_size == block_size + HEAP_ALLOC_BONUS );
0351 
0352   if ( free_size >= free_size_limit ) {
0353     Heap_Block *const free_block = _Heap_Block_at( block, used_block_size );
0354     uintptr_t free_block_size = block_size - used_block_size;
0355     uintptr_t const next_block_size = _Heap_Block_size( next_block );
0356     Heap_Block *const next_next_block =
0357       _Heap_Block_at( next_block, next_block_size );
0358 
0359     _HAssert( used_block_size + free_block_size == block_size );
0360 
0361     _Heap_Block_set_size( block, used_block_size );
0362 
0363     /* Statistics */
0364     stats->free_size += free_block_size;
0365 
0366     if ( _Heap_Is_prev_used( next_next_block ) ) {
0367       _Heap_Free_list_insert_after( free_list_anchor, free_block );
0368 
0369       /* Statistics */
0370       ++stats->free_blocks;
0371     } else {
0372       _Heap_Free_list_replace( next_block, free_block );
0373 
0374       free_block_size += next_block_size;
0375 
0376       next_block = _Heap_Block_at( free_block, free_block_size );
0377     }
0378 
0379     free_block->size_and_flag = free_block_size | HEAP_PREV_BLOCK_USED;
0380 
0381     next_block->prev_size = free_block_size;
0382     next_block->size_and_flag &= ~HEAP_PREV_BLOCK_USED;
0383 
0384     _Heap_Protection_block_initialize( heap, free_block );
0385   } else {
0386     next_block->size_and_flag |= HEAP_PREV_BLOCK_USED;
0387   }
0388 }
0389 
0390 static Heap_Block *_Heap_Block_allocate_from_begin(
0391   Heap_Control *heap,
0392   Heap_Block *block,
0393   Heap_Block *next_block,
0394   Heap_Block *free_list_anchor,
0395   uintptr_t alloc_size
0396 )
0397 {
0398   _Heap_Block_split( heap, block, next_block, free_list_anchor, alloc_size );
0399 
0400   return block;
0401 }
0402 
0403 static Heap_Block *_Heap_Block_allocate_from_end(
0404   Heap_Control *heap,
0405   Heap_Block *block,
0406   Heap_Block *next_block,
0407   Heap_Block *free_list_anchor,
0408   uintptr_t alloc_begin,
0409   uintptr_t alloc_size
0410 )
0411 {
0412   Heap_Statistics *const stats = &heap->stats;
0413 
0414   Heap_Block *const new_block =
0415     _Heap_Block_of_alloc_area( alloc_begin, heap->page_size );
0416   uintptr_t const new_block_begin = (uintptr_t) new_block;
0417   uintptr_t const new_block_size = (uintptr_t) next_block - new_block_begin;
0418   uintptr_t block_size_adjusted = (uintptr_t) new_block - (uintptr_t) block;
0419 
0420   _HAssert( block_size_adjusted >= heap->min_block_size );
0421   _HAssert( new_block_size >= heap->min_block_size );
0422 
0423   /* Statistics */
0424   stats->free_size += block_size_adjusted;
0425 
0426   if ( _Heap_Is_prev_used( block ) ) {
0427     _Heap_Free_list_insert_after( free_list_anchor, block );
0428 
0429     free_list_anchor = block;
0430 
0431     /* Statistics */
0432     ++stats->free_blocks;
0433   } else {
0434     Heap_Block *const prev_block = _Heap_Prev_block( block );
0435     uintptr_t const prev_block_size = _Heap_Block_size( prev_block );
0436 
0437     block = prev_block;
0438     block_size_adjusted += prev_block_size;
0439   }
0440 
0441   block->size_and_flag = block_size_adjusted | HEAP_PREV_BLOCK_USED;
0442 
0443   new_block->prev_size = block_size_adjusted;
0444   new_block->size_and_flag = new_block_size;
0445 
0446   _Heap_Block_split( heap, new_block, next_block, free_list_anchor, alloc_size );
0447 
0448   return new_block;
0449 }
0450 
0451 Heap_Block *_Heap_Block_allocate(
0452   Heap_Control *heap,
0453   Heap_Block *block,
0454   uintptr_t alloc_begin,
0455   uintptr_t alloc_size
0456 )
0457 {
0458   Heap_Statistics *const stats = &heap->stats;
0459 
0460   uintptr_t const alloc_area_begin = _Heap_Alloc_area_of_block( block );
0461   uintptr_t const alloc_area_offset = alloc_begin - alloc_area_begin;
0462   uintptr_t const block_size = _Heap_Block_size( block );
0463   Heap_Block *const next_block = _Heap_Block_at( block, block_size );
0464 
0465   Heap_Block *free_list_anchor = NULL;
0466 
0467   _HAssert( alloc_area_begin <= alloc_begin );
0468 
0469   if ( _Heap_Is_prev_used( next_block ) ) {
0470     free_list_anchor = _Heap_Free_list_head( heap );
0471   } else {
0472     free_list_anchor = block->prev;
0473 
0474     _Heap_Free_list_remove( block );
0475 
0476     /* Statistics */
0477     --stats->free_blocks;
0478     ++stats->used_blocks;
0479     stats->free_size -= block_size;
0480   }
0481 
0482   if ( alloc_area_offset < heap->page_size ) {
0483     alloc_size += alloc_area_offset;
0484 
0485     block = _Heap_Block_allocate_from_begin(
0486       heap,
0487       block,
0488       next_block,
0489       free_list_anchor,
0490       alloc_size
0491     );
0492   } else {
0493     block = _Heap_Block_allocate_from_end(
0494       heap,
0495       block,
0496       next_block,
0497       free_list_anchor,
0498       alloc_begin,
0499       alloc_size
0500     );
0501   }
0502 
0503   /* Statistics */
0504   if ( stats->min_free_size > stats->free_size ) {
0505     stats->min_free_size = stats->free_size;
0506   }
0507 
0508   _Heap_Protection_block_initialize( heap, block );
0509 
0510   return block;
0511 }