Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:23:05

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /**
0004  * @file
0005  *
0006  * @ingroup RTEMSBSPsARMShared
0007  *
0008  * @brief This header file provides interfaces of the ARM CP15 cache controller
0009  *   suppport.
0010  */
0011 
0012 /*
0013  * Copyright (c) 2014 embedded brains GmbH & Co. KG
0014  *
0015  * Redistribution and use in source and binary forms, with or without
0016  * modification, are permitted provided that the following conditions
0017  * are met:
0018  * 1. Redistributions of source code must retain the above copyright
0019  *    notice, this list of conditions and the following disclaimer.
0020  * 2. Redistributions in binary form must reproduce the above copyright
0021  *    notice, this list of conditions and the following disclaimer in the
0022  *    documentation and/or other materials provided with the distribution.
0023  *
0024  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0025  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0026  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0027  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
0028  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0029  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0030  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0031  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0032  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0033  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
0034  * POSSIBILITY OF SUCH DAMAGE.
0035  */
0036 
0037 #ifndef LIBBSP_ARM_SHARED_CACHE_L1_H
0038 #define LIBBSP_ARM_SHARED_CACHE_L1_H
0039 
0040 #include <bsp.h>
0041 #include <libcpu/arm-cp15.h>
0042 
0043 #ifdef __cplusplus
0044 extern "C" {
0045 #endif /* __cplusplus */
0046 
0047 /* These two defines also ensure that the rtems_cache_* functions have bodies */
0048 #define ARM_CACHE_L1_CPU_DATA_ALIGNMENT 32
0049 #define ARM_CACHE_L1_CPU_INSTRUCTION_ALIGNMENT 32
0050 
0051 #define ARM_CACHE_L1_CSS_ID_DATA \
0052           (ARM_CP15_CACHE_CSS_ID_DATA | ARM_CP15_CACHE_CSS_LEVEL(0))
0053 #define ARM_CACHE_L1_CSS_ID_INSTRUCTION \
0054           (ARM_CP15_CACHE_CSS_ID_INSTRUCTION | ARM_CP15_CACHE_CSS_LEVEL(0))
0055 #define ARM_CACHE_L1_DATA_LINE_MASK ( ARM_CACHE_L1_CPU_DATA_ALIGNMENT - 1 )
0056 #define ARM_CACHE_L1_INSTRUCTION_LINE_MASK \
0057   ( ARM_CACHE_L1_CPU_INSTRUCTION_ALIGNMENT \
0058     - 1 )
0059 
0060 /* Errata Handlers */
0061 static void arm_cache_l1_errata_764369_handler( void )
0062 {
0063 #ifdef RTEMS_SMP
0064   _ARM_Data_synchronization_barrier();
0065 #endif
0066 }
0067 
0068 /*
0069  * @param l1LineSize      Number of bytes in cache line expressed as power of 
0070  *                        2 value
0071  * @param l1Associativity Associativity of cache. The associativity does not 
0072  *                        have to be a power of 2.
0073  * qparam liNumSets       Number of sets in cache
0074  * */
0075 
0076 static inline void arm_cache_l1_properties_for_level(
0077   uint32_t *l1LineSize,
0078   uint32_t *l1Associativity,
0079   uint32_t *l1NumSets,
0080   uint32_t level_and_inst_dat
0081 )
0082 {
0083   uint32_t ccsidr;
0084 
0085   ccsidr = arm_cp15_get_cache_size_id_for_level(level_and_inst_dat);
0086 
0087   /* Cache line size in words + 2 -> bytes) */
0088   *l1LineSize      = arm_ccsidr_get_line_power(ccsidr);
0089   /* Number of Ways */
0090   *l1Associativity = arm_ccsidr_get_associativity(ccsidr);
0091   /* Number of Sets */
0092   *l1NumSets       = arm_ccsidr_get_num_sets(ccsidr);
0093 }
0094 
0095 /*
0096  * @param log_2_line_bytes The number of bytes per cache line expressed in log2
0097  * @param associativity    The associativity of the cache beeing operated
0098  * @param cache_level_idx  The level of the cache beeing operated minus 1 e.g 0
0099  *                         for cache level 1
0100  * @param set              Number of the set to operate on
0101  * @param way              Number of the way to operate on
0102  * */
0103 
0104 static inline uint32_t arm_cache_l1_get_set_way_param(
0105   const uint32_t log_2_line_bytes,
0106   const uint32_t associativity,
0107   const uint32_t cache_level_idx,
0108   const uint32_t set,
0109   const uint32_t way )
0110 {
0111   uint32_t way_shift = __builtin_clz( associativity - 1 );
0112 
0113 
0114   return ( 0
0115            | ( way
0116     << way_shift ) | ( set << log_2_line_bytes ) | ( cache_level_idx << 1 ) );
0117 }
0118 
0119 static inline void arm_cache_l1_flush_1_data_line( const void *d_addr )
0120 {
0121   /* Flush the Data cache */
0122   arm_cp15_data_cache_clean_and_invalidate_line( d_addr );
0123 
0124   /* Wait for L1 flush to complete */
0125   _ARM_Data_synchronization_barrier();
0126 }
0127 
0128 static inline void arm_cache_l1_flush_entire_data( void )
0129 {
0130   uint32_t l1LineSize, l1Associativity, l1NumSets;
0131   uint32_t s, w;
0132   uint32_t set_way_param;
0133 
0134   /* ensure ordering with previous memory accesses */
0135   _ARM_Data_memory_barrier();
0136 
0137   /* Get the L1 cache properties */
0138   arm_cache_l1_properties_for_level( &l1LineSize,
0139                     &l1Associativity, &l1NumSets,
0140                     ARM_CACHE_L1_CSS_ID_DATA);
0141 
0142   for ( w = 0; w < l1Associativity; ++w ) {
0143     for ( s = 0; s < l1NumSets; ++s ) {
0144       set_way_param = arm_cache_l1_get_set_way_param(
0145         l1LineSize,
0146         l1Associativity,
0147         0,
0148         s,
0149         w
0150         );
0151       arm_cp15_data_cache_clean_line_by_set_and_way( set_way_param );
0152     }
0153   }
0154 
0155   /* Wait for L1 flush to complete */
0156   _ARM_Data_synchronization_barrier();
0157 }
0158 
0159 static inline void arm_cache_l1_invalidate_entire_data( void )
0160 {
0161   uint32_t l1LineSize, l1Associativity, l1NumSets;
0162   uint32_t s, w;
0163   uint32_t set_way_param;
0164 
0165   /* ensure ordering with previous memory accesses */
0166   _ARM_Data_memory_barrier();
0167 
0168   /* Get the L1 cache properties */
0169   arm_cache_l1_properties_for_level( &l1LineSize,
0170                     &l1Associativity, &l1NumSets,
0171                     ARM_CACHE_L1_CSS_ID_DATA);
0172 
0173   for ( w = 0; w < l1Associativity; ++w ) {
0174     for ( s = 0; s < l1NumSets; ++s ) {
0175       set_way_param = arm_cache_l1_get_set_way_param(
0176         l1LineSize,
0177         l1Associativity,
0178         0,
0179         s,
0180         w
0181         );
0182       arm_cp15_data_cache_invalidate_line_by_set_and_way( set_way_param );
0183     }
0184   }
0185 
0186   /* Wait for L1 invalidate to complete */
0187   _ARM_Data_synchronization_barrier();
0188 }
0189 
0190 static inline void arm_cache_l1_clean_and_invalidate_entire_data( void )
0191 {
0192   uint32_t l1LineSize, l1Associativity, l1NumSets;
0193   uint32_t s, w;
0194   uint32_t set_way_param;
0195 
0196   /* ensure ordering with previous memory accesses */
0197   _ARM_Data_memory_barrier();
0198 
0199 
0200   /* Get the L1 cache properties */
0201   arm_cache_l1_properties_for_level( &l1LineSize,
0202                     &l1Associativity, &l1NumSets,
0203                     ARM_CACHE_L1_CSS_ID_DATA);
0204 
0205   for ( w = 0; w < l1Associativity; ++w ) {
0206     for ( s = 0; s < l1NumSets; ++s ) {
0207       set_way_param = arm_cache_l1_get_set_way_param(
0208         l1LineSize,
0209         l1Associativity,
0210         0,
0211         s,
0212         w
0213         );
0214       arm_cp15_data_cache_clean_and_invalidate_line_by_set_and_way(
0215         set_way_param );
0216     }
0217   }
0218 
0219   /* Wait for L1 invalidate to complete */
0220   _ARM_Data_synchronization_barrier();
0221 }
0222 
0223 static inline void arm_cache_l1_flush_data_range( 
0224   const void *d_addr,
0225   size_t      n_bytes
0226 )
0227 {
0228   if ( n_bytes != 0 ) {
0229     uint32_t       adx       = (uint32_t) d_addr
0230                                & ~ARM_CACHE_L1_DATA_LINE_MASK;
0231     const uint32_t ADDR_LAST =
0232       (uint32_t)( (size_t) d_addr + n_bytes - 1 );
0233 
0234     arm_cache_l1_errata_764369_handler();
0235 
0236     for (; adx <= ADDR_LAST; adx += ARM_CACHE_L1_CPU_DATA_ALIGNMENT ) {
0237       /* Store and invalidate the Data cache line */
0238       arm_cp15_data_cache_clean_and_invalidate_line( (void*)adx );
0239     }
0240     /* Wait for L1 store to complete */
0241     _ARM_Data_synchronization_barrier();
0242   }
0243 }
0244 
0245 
0246 static inline void arm_cache_l1_invalidate_1_data_line(
0247   const void *d_addr )
0248 {
0249   /* Invalidate the data cache line */
0250   arm_cp15_data_cache_invalidate_line( d_addr );
0251 
0252   /* Wait for L1 invalidate to complete */
0253   _ARM_Data_synchronization_barrier();
0254 }
0255 
0256 static inline void arm_cache_l1_freeze_data( void )
0257 {
0258   /* To be implemented as needed, if supported by hardware at all */
0259 }
0260 
0261 static inline void arm_cache_l1_unfreeze_data( void )
0262 {
0263   /* To be implemented as needed, if supported by hardware at all */
0264 }
0265 
0266 static inline void arm_cache_l1_invalidate_1_instruction_line(
0267   const void *i_addr )
0268 {
0269   /* Invalidate the Instruction cache line */
0270   arm_cp15_instruction_cache_invalidate_line( i_addr );
0271 
0272   /* Wait for L1 invalidate to complete */
0273   _ARM_Data_synchronization_barrier();
0274 }
0275 
0276 static inline void arm_cache_l1_invalidate_data_range(
0277   const void *d_addr,
0278   size_t      n_bytes
0279 )
0280 {
0281   if ( n_bytes != 0 ) {
0282     uint32_t       adx = (uint32_t) d_addr
0283                          & ~ARM_CACHE_L1_DATA_LINE_MASK;
0284     const uint32_t end =
0285       (uint32_t)( (size_t)d_addr + n_bytes -1);
0286 
0287     arm_cache_l1_errata_764369_handler();
0288     
0289     /* Back starting address up to start of a line and invalidate until end */
0290     for (;
0291          adx <= end;
0292          adx += ARM_CACHE_L1_CPU_DATA_ALIGNMENT ) {
0293         /* Invalidate the Instruction cache line */
0294         arm_cp15_data_cache_invalidate_line( (void*)adx );
0295     }
0296     /* Wait for L1 invalidate to complete */
0297     _ARM_Data_synchronization_barrier();
0298   }
0299 }
0300 
0301 static inline void arm_cache_l1_invalidate_instruction_range(
0302   const void *i_addr,
0303   size_t      n_bytes
0304 )
0305 {
0306   if ( n_bytes != 0 ) {
0307     uint32_t       adx = (uint32_t) i_addr
0308                          & ~ARM_CACHE_L1_INSTRUCTION_LINE_MASK;
0309     const uint32_t end =
0310       (uint32_t)( (size_t)i_addr + n_bytes -1);
0311 
0312     arm_cache_l1_errata_764369_handler();
0313 
0314     /* Back starting address up to start of a line and invalidate until end */
0315     for (;
0316          adx <= end;
0317          adx += ARM_CACHE_L1_CPU_INSTRUCTION_ALIGNMENT ) {
0318         /* Invalidate the Instruction cache line */
0319         arm_cp15_instruction_cache_invalidate_line( (void*)adx );
0320     }
0321     /* Wait for L1 invalidate to complete */
0322     _ARM_Data_synchronization_barrier();
0323   }
0324 }
0325 
0326 static inline void arm_cache_l1_invalidate_entire_instruction( void )
0327 {
0328   uint32_t ctrl = arm_cp15_get_control();
0329 
0330 
0331   #ifdef RTEMS_SMP
0332 
0333   /* invalidate I-cache inner shareable */
0334   arm_cp15_instruction_cache_inner_shareable_invalidate_all();
0335 
0336   /* I+BTB cache invalidate */
0337   arm_cp15_instruction_cache_invalidate();
0338   #else /* RTEMS_SMP */
0339   /* I+BTB cache invalidate */
0340   arm_cp15_instruction_cache_invalidate();
0341   #endif /* RTEMS_SMP */
0342 
0343   if ( ( ctrl & ARM_CP15_CTRL_Z ) != 0 ) {
0344     #if defined(__ARM_ARCH_7A__)
0345     arm_cp15_branch_predictor_inner_shareable_invalidate_all();
0346     #endif
0347     #if defined(__ARM_ARCH_6KZ__) || defined(__ARM_ARCH_7A__)
0348     arm_cp15_branch_predictor_invalidate_all();
0349     #endif
0350   }
0351 }
0352 
0353 static inline void arm_cache_l1_freeze_instruction( void )
0354 {
0355   /* To be implemented as needed, if supported by hardware at all */
0356 }
0357 
0358 static inline void arm_cache_l1_unfreeze_instruction( void )
0359 {
0360   /* To be implemented as needed, if supported by hardware at all */
0361 }
0362 
0363 static inline size_t arm_cache_l1_get_data_cache_size( void )
0364 {
0365   size_t   size;
0366   uint32_t line_size     = 0;
0367   uint32_t associativity = 0;
0368   uint32_t num_sets      = 0;
0369 
0370   arm_cache_l1_properties_for_level( &line_size,
0371                     &associativity, &num_sets,
0372                     ARM_CACHE_L1_CSS_ID_DATA);
0373 
0374   size = (1 << line_size) * associativity * num_sets;
0375 
0376   return size;
0377 }
0378 
0379 static inline size_t arm_cache_l1_get_instruction_cache_size( void )
0380 {
0381   size_t   size;
0382   uint32_t line_size     = 0;
0383   uint32_t associativity = 0;
0384   uint32_t num_sets      = 0;
0385 
0386   arm_cache_l1_properties_for_level( &line_size,
0387                     &associativity, &num_sets,
0388                     ARM_CACHE_L1_CSS_ID_INSTRUCTION);
0389 
0390   size = (1 << line_size) * associativity * num_sets;
0391 
0392   return size;
0393 }
0394 
0395 #ifdef __cplusplus
0396 }
0397 #endif /* __cplusplus */
0398 
0399 #endif /* LIBBSP_ARM_SHARED_CACHE_L1_H */