Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:24:04

0001 /**
0002  * @file
0003  *
0004  * @ingroup RTEMSImplClassicCache
0005  *
0006  * @brief This header file contains the implementation of the
0007  *   @ref RTEMSAPIClassicCache.
0008  */
0009 
0010 /*
0011  *  Cache Manager
0012  *
0013  *  Copyright (C) 2014, 2018 embedded brains GmbH & Co. KG
0014  *
0015  *  COPYRIGHT (c) 1989-1999.
0016  *  On-Line Applications Research Corporation (OAR).
0017  *
0018  *  The license and distribution terms for this file may be
0019  *  found in the file LICENSE in this distribution or at
0020  *  http://www.rtems.org/license/LICENSE.
0021  */
0022 
0023 /**
0024  * @file
0025  *
0026  * The functions in this file implement the API to the
0027  * @ref RTEMSAPIClassicCache.  This file is intended to be included in a cache
0028  * implemention source file provided by the architecture or BSP, e.g.
0029  *
0030  *  - bsps/${RTEMS_CPU}/shared/cache/cache.c
0031  *  - bsps/${RTEMS_CPU}/${RTEMS_BSP_FAMILY}/start/cache.c
0032  *
0033  * In this file a couple of defines and inline functions may be provided and
0034  * afterwards this file is included, e.g.
0035  *
0036  *  @code
0037  *  #define CPU_DATA_CACHE_ALIGNMENT XYZ
0038  *  ...
0039  *  #include "../../../bsps/shared/cache/cacheimpl.h"
0040  *  @endcode
0041  *
0042  * The cache implementation source file shall define
0043  *
0044  *  @code
0045  *  #define CPU_DATA_CACHE_ALIGNMENT <POSITIVE INTEGER>
0046  *  @endcode
0047  *
0048  * to enable the data cache support.
0049  *
0050  * The cache implementation source file shall define
0051  *
0052  *  @code
0053  *  #define CPU_INSTRUCTION_CACHE_ALIGNMENT <POSITIVE INTEGER>
0054  *  @endcode
0055  *
0056  * to enable the instruction cache support.
0057  *
0058  * The cache implementation source file shall define
0059  *
0060  *  @code
0061  *  #define CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS
0062  *  @endcode
0063  *
0064  * if it provides cache maintenance functions which operate on multiple lines.
0065  * Otherwise a generic loop with single line operations will be used.  It is
0066  * strongly recommended to provide the implementation in terms of static inline
0067  * functions for performance reasons.
0068  *
0069  * The cache implementation source file shall define
0070  *
0071  *  @code
0072  *  #define CPU_CACHE_SUPPORT_PROVIDES_CACHE_SIZE_FUNCTIONS
0073  *  @endcode
0074  *
0075  * if it provides functions to get the data and instruction cache sizes by
0076  * level.
0077  *
0078  * The cache implementation source file shall define
0079  *
0080  *  @code
0081  *  #define CPU_CACHE_SUPPORT_PROVIDES_INSTRUCTION_SYNC_FUNCTION
0082  *  @endcode
0083  *
0084  * if special instructions must be used to synchronize the instruction caches
0085  * after a code change.
0086  *
0087  * The cache implementation source file shall define
0088  *
0089  *  @code
0090  *  #define CPU_CACHE_SUPPORT_PROVIDES_DISABLE_DATA
0091  *  @endcode
0092  *
0093  * if an external implementation of rtems_cache_disable_data() is provided,
0094  * e.g. as an implementation in assembly code.
0095  *
0096  * The cache implementation source file shall define
0097  *
0098  *  @code
0099  *  #define CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING
0100  *  @endcode
0101  *
0102  * if the hardware provides no instruction cache snooping and the instruction
0103  * cache invalidation needs software support.
0104  *
0105  * The functions below are implemented with inline routines found in the cache
0106  * implementation source file for each architecture or BSP.  In the event that
0107  * not support for a specific function for a cache is provided, the API routine
0108  * does nothing (but does exist).
0109  */
0110 
0111 #include <rtems.h>
0112 
0113 #include <sys/param.h>
0114 
0115 #if defined(RTEMS_SMP) && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
0116 #include <rtems/score/smpimpl.h>
0117 #include <rtems/score/threaddispatch.h>
0118 #endif
0119 
0120 #if CPU_DATA_CACHE_ALIGNMENT > CPU_CACHE_LINE_BYTES
0121 #error "CPU_DATA_CACHE_ALIGNMENT is greater than CPU_CACHE_LINE_BYTES"
0122 #endif
0123 
0124 #if CPU_INSTRUCTION_CACHE_ALIGNMENT > CPU_CACHE_LINE_BYTES
0125 #error "CPU_INSTRUCTION_CACHE_ALIGNMENT is greater than CPU_CACHE_LINE_BYTES"
0126 #endif
0127 
0128 /**
0129  * @defgroup RTEMSImplClassicCache Cache Manager
0130  *
0131  * @ingroup RTEMSImplClassic
0132  *
0133  * @brief This group contains the Cache Manager implementation.
0134  */
0135 
0136 /*
0137  * THESE FUNCTIONS ONLY HAVE BODIES IF WE HAVE A DATA CACHE
0138  */
0139 
0140 /*
0141  * This function is called to flush the data cache by performing cache
0142  * copybacks. It must determine how many cache lines need to be copied
0143  * back and then perform the copybacks.
0144  */
0145 void
0146 rtems_cache_flush_multiple_data_lines( const void * d_addr, size_t n_bytes )
0147 {
0148 #if defined(CPU_DATA_CACHE_ALIGNMENT)
0149 #if defined(CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS)
0150   _CPU_cache_flush_data_range( d_addr, n_bytes );
0151 #else
0152   const void * final_address;
0153 
0154  /*
0155   * Set d_addr to the beginning of the cache line; final_address indicates
0156   * the last address_t which needs to be pushed. Increment d_addr and push
0157   * the resulting line until final_address is passed.
0158   */
0159 
0160   if( n_bytes == 0 )
0161     /* Do nothing if number of bytes to flush is zero */
0162     return;
0163 
0164   final_address = (void *)((size_t)d_addr + n_bytes - 1);
0165   d_addr = (void *)((size_t)d_addr & ~(CPU_DATA_CACHE_ALIGNMENT - 1));
0166   while( d_addr <= final_address )  {
0167     _CPU_cache_flush_1_data_line( d_addr );
0168     d_addr = (void *)((size_t)d_addr + CPU_DATA_CACHE_ALIGNMENT);
0169   }
0170 #endif
0171 #endif
0172 }
0173 
0174 /*
0175  * This function is responsible for performing a data cache invalidate.
0176  * It must determine how many cache lines need to be invalidated and then
0177  * perform the invalidations.
0178  */
0179 void
0180 rtems_cache_invalidate_multiple_data_lines( const void * d_addr, size_t n_bytes )
0181 {
0182 #if defined(CPU_DATA_CACHE_ALIGNMENT)
0183 #if defined(CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS)
0184   _CPU_cache_invalidate_data_range( d_addr, n_bytes );
0185 #else
0186   const void * final_address;
0187 
0188  /*
0189   * Set d_addr to the beginning of the cache line; final_address indicates
0190   * the last address_t which needs to be invalidated. Increment d_addr and
0191   * invalidate the resulting line until final_address is passed.
0192   */
0193 
0194   if( n_bytes == 0 )
0195     /* Do nothing if number of bytes to invalidate is zero */
0196     return;
0197 
0198   final_address = (void *)((size_t)d_addr + n_bytes - 1);
0199   d_addr = (void *)((size_t)d_addr & ~(CPU_DATA_CACHE_ALIGNMENT - 1));
0200   while( final_address >= d_addr ) {
0201     _CPU_cache_invalidate_1_data_line( d_addr );
0202     d_addr = (void *)((size_t)d_addr + CPU_DATA_CACHE_ALIGNMENT);
0203   }
0204 #endif
0205 #endif
0206 }
0207 
0208 /*
0209  * This function is responsible for performing a data cache flush.
0210  * It flushes the entire cache.
0211  */
0212 void
0213 rtems_cache_flush_entire_data( void )
0214 {
0215 #if defined(CPU_DATA_CACHE_ALIGNMENT)
0216    /*
0217     * Call the CPU-specific routine
0218     */
0219    _CPU_cache_flush_entire_data();
0220 #endif
0221 }
0222 
0223 /*
0224  * This function is responsible for performing a data cache
0225  * invalidate. It invalidates the entire cache.
0226  */
0227 void
0228 rtems_cache_invalidate_entire_data( void )
0229 {
0230 #if defined(CPU_DATA_CACHE_ALIGNMENT)
0231  /*
0232   * Call the CPU-specific routine
0233   */
0234 
0235  _CPU_cache_invalidate_entire_data();
0236 #endif
0237 }
0238 
0239 /*
0240  * This function returns the data cache granularity.
0241  */
0242 size_t
0243 rtems_cache_get_data_line_size( void )
0244 {
0245 #if defined(CPU_DATA_CACHE_ALIGNMENT)
0246   return CPU_DATA_CACHE_ALIGNMENT;
0247 #else
0248   return 0;
0249 #endif
0250 }
0251 
0252 size_t
0253 rtems_cache_get_data_cache_size( uint32_t level )
0254 {
0255 #if defined(CPU_CACHE_SUPPORT_PROVIDES_CACHE_SIZE_FUNCTIONS)
0256   return _CPU_cache_get_data_cache_size( level );
0257 #else
0258   return 0;
0259 #endif
0260 }
0261 
0262 /*
0263  * This function freezes the data cache; cache lines
0264  * are not replaced.
0265  */
0266 void
0267 rtems_cache_freeze_data( void )
0268 {
0269 #if defined(CPU_DATA_CACHE_ALIGNMENT)
0270   _CPU_cache_freeze_data();
0271 #endif
0272 }
0273 
0274 void rtems_cache_unfreeze_data( void )
0275 {
0276 #if defined(CPU_DATA_CACHE_ALIGNMENT)
0277   _CPU_cache_unfreeze_data();
0278 #endif
0279 }
0280 
0281 void
0282 rtems_cache_enable_data( void )
0283 {
0284 #if defined(CPU_DATA_CACHE_ALIGNMENT)
0285   _CPU_cache_enable_data();
0286 #endif
0287 }
0288 
0289 #if !defined(CPU_CACHE_SUPPORT_PROVIDES_DISABLE_DATA)
0290 void
0291 rtems_cache_disable_data( void )
0292 {
0293 #if defined(CPU_DATA_CACHE_ALIGNMENT)
0294   _CPU_cache_disable_data();
0295 #endif
0296 }
0297 #endif
0298 
0299 /*
0300  * THESE FUNCTIONS ONLY HAVE BODIES IF WE HAVE AN INSTRUCTION CACHE
0301  */
0302 
0303 #if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT) \
0304   && defined(RTEMS_SMP) \
0305   && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
0306 
0307 typedef struct {
0308   const void *addr;
0309   size_t size;
0310 } smp_cache_area;
0311 
0312 static void smp_cache_inst_inv(void *arg)
0313 {
0314   smp_cache_area *area = arg;
0315 
0316   _CPU_cache_invalidate_instruction_range(area->addr, area->size);
0317 }
0318 
0319 static void smp_cache_inst_inv_all(void *arg)
0320 {
0321   (void) arg;
0322   _CPU_cache_invalidate_entire_instruction();
0323 }
0324 
0325 static void smp_cache_broadcast( SMP_Action_handler handler, void *arg )
0326 {
0327   uint32_t         isr_level;
0328   Per_CPU_Control *cpu_self;
0329 
0330   isr_level = _ISR_Get_level();
0331 
0332   if ( isr_level == 0 ) {
0333     cpu_self = _Thread_Dispatch_disable();
0334   } else {
0335     cpu_self = _Per_CPU_Get();
0336   }
0337 
0338   ( *handler )( arg );
0339   _SMP_Othercast_action( handler, arg );
0340 
0341   if ( isr_level == 0 ) {
0342     _Thread_Dispatch_enable( cpu_self );
0343   }
0344 }
0345 
0346 #endif
0347 
0348 /*
0349  * This function is responsible for performing an instruction cache
0350  * invalidate. It must determine how many cache lines need to be invalidated
0351  * and then perform the invalidations.
0352  */
0353 #if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT) \
0354   && !defined(CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS)
0355 static void
0356 _CPU_cache_invalidate_instruction_range(
0357   const void * i_addr,
0358   size_t n_bytes
0359 )
0360 {
0361   const void * final_address;
0362 
0363  /*
0364   * Set i_addr to the beginning of the cache line; final_address indicates
0365   * the last address_t which needs to be invalidated. Increment i_addr and
0366   * invalidate the resulting line until final_address is passed.
0367   */
0368 
0369   if( n_bytes == 0 )
0370     /* Do nothing if number of bytes to invalidate is zero */
0371     return;
0372 
0373   final_address = (void *)((size_t)i_addr + n_bytes - 1);
0374   i_addr = (void *)((size_t)i_addr & ~(CPU_INSTRUCTION_CACHE_ALIGNMENT - 1));
0375   while( final_address >= i_addr ) {
0376     _CPU_cache_invalidate_1_instruction_line( i_addr );
0377     i_addr = (void *)((size_t)i_addr + CPU_INSTRUCTION_CACHE_ALIGNMENT);
0378   }
0379 }
0380 #endif
0381 
0382 void
0383 rtems_cache_invalidate_multiple_instruction_lines(
0384   const void * i_addr,
0385   size_t n_bytes
0386 )
0387 {
0388 #if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
0389 #if defined(RTEMS_SMP) && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
0390   smp_cache_area area = { i_addr, n_bytes };
0391 
0392   smp_cache_broadcast( smp_cache_inst_inv, &area );
0393 #else
0394   _CPU_cache_invalidate_instruction_range( i_addr, n_bytes );
0395 #endif
0396 #endif
0397 }
0398 
0399 /*
0400  * This function is responsible for performing an instruction cache
0401  * invalidate. It invalidates the entire cache.
0402  */
0403 void
0404 rtems_cache_invalidate_entire_instruction( void )
0405 {
0406 #if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
0407 #if defined(RTEMS_SMP) && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
0408   smp_cache_broadcast( smp_cache_inst_inv_all, NULL );
0409 #else
0410  _CPU_cache_invalidate_entire_instruction();
0411 #endif
0412 #endif
0413 }
0414 
0415 /*
0416  * This function returns the instruction cache granularity.
0417  */
0418 size_t
0419 rtems_cache_get_instruction_line_size( void )
0420 {
0421 #if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
0422   return CPU_INSTRUCTION_CACHE_ALIGNMENT;
0423 #else
0424   return 0;
0425 #endif
0426 }
0427 
0428 size_t
0429 rtems_cache_get_instruction_cache_size( uint32_t level )
0430 {
0431 #if defined(CPU_CACHE_SUPPORT_PROVIDES_CACHE_SIZE_FUNCTIONS)
0432   return _CPU_cache_get_instruction_cache_size( level );
0433 #else
0434   return 0;
0435 #endif
0436 }
0437 
0438 /*
0439  * This function freezes the instruction cache; cache lines
0440  * are not replaced.
0441  */
0442 void
0443 rtems_cache_freeze_instruction( void )
0444 {
0445 #if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
0446   _CPU_cache_freeze_instruction();
0447 #endif
0448 }
0449 
0450 void rtems_cache_unfreeze_instruction( void )
0451 {
0452 #if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
0453   _CPU_cache_unfreeze_instruction();
0454 #endif
0455 }
0456 
0457 void
0458 rtems_cache_enable_instruction( void )
0459 {
0460 #if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
0461   _CPU_cache_enable_instruction();
0462 #endif
0463 }
0464 
0465 void
0466 rtems_cache_disable_instruction( void )
0467 {
0468 #if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
0469   _CPU_cache_disable_instruction();
0470 #endif
0471 }
0472 
0473 /* Returns the maximal cache line size of all cache kinds in bytes. */
0474 size_t rtems_cache_get_maximal_line_size( void )
0475 {
0476 #if defined(CPU_MAXIMAL_CACHE_ALIGNMENT)
0477   return CPU_MAXIMAL_CACHE_ALIGNMENT;
0478 #endif
0479   size_t data_line_size =
0480 #if defined(CPU_DATA_CACHE_ALIGNMENT)
0481     CPU_DATA_CACHE_ALIGNMENT;
0482 #else
0483     0;
0484 #endif
0485   size_t instruction_line_size =
0486 #if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
0487     CPU_INSTRUCTION_CACHE_ALIGNMENT;
0488 #else
0489     0;
0490 #endif
0491   return MAX( data_line_size, instruction_line_size );
0492 }
0493 
0494 /*
0495  * Purpose is to synchronize caches after code has been loaded
0496  * or self modified. Actual implementation is simple only
0497  * but it can and should be repaced by optimized version
0498  * which does not need flush and invalidate all cache levels
0499  * when code is changed.
0500  */
0501 void rtems_cache_instruction_sync_after_code_change(
0502   const void *code_addr,
0503   size_t      n_bytes
0504 )
0505 {
0506 #if defined(CPU_CACHE_SUPPORT_PROVIDES_INSTRUCTION_SYNC_FUNCTION)
0507   _CPU_cache_instruction_sync_after_code_change( code_addr, n_bytes );
0508 #else
0509   rtems_cache_flush_multiple_data_lines( code_addr, n_bytes );
0510   rtems_cache_invalidate_multiple_instruction_lines( code_addr, n_bytes );
0511 #endif
0512 }