Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:23:51

0001 /*
0002  * COPYRIGHT (c) 2014, 2016 ÅAC Microtec AB <www.aacmicrotec.com>
0003  * Contributor(s):
0004  *  Karol Gugala <kgugala@antmicro.com>
0005  *  Martin Werner <martin.werner@aacmicrotec.com>
0006  *
0007  * COPYRIGHT (c) 2014 Hesham ALMatary <heshamelmatary@gmail.com>
0008  *
0009  * COPYRIGHT (c) 1989-2006
0010  * On-Line Applications Research Corporation (OAR).
0011  *
0012  * The license and distribution terms for this file may be
0013  * found in the file LICENSE in this distribution or at
0014  * http://www.rtems.org/license/LICENSE.
0015  */
0016 
0017 #include <rtems/score/cpu.h>
0018 #include <rtems/score/interr.h>
0019 #include <rtems/score/or1k-utility.h>
0020 #include <rtems/score/percpu.h>
0021 
0022 #define CPU_DATA_CACHE_ALIGNMENT        32
0023 #define CPU_INSTRUCTION_CACHE_ALIGNMENT 32
0024 
0025 #define CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS 1
0026 #define CPU_CACHE_SUPPORT_PROVIDES_CACHE_SIZE_FUNCTIONS 1
0027 
0028 static inline size_t
0029 _CPU_cache_get_data_cache_size( const uint32_t level )
0030 {
0031   return (level == 0 || level == 1)? 8192 : 0;
0032 }
0033 
0034 static inline size_t
0035 _CPU_cache_get_instruction_cache_size( const uint32_t level )
0036 {
0037   return (level == 0 || level == 1)? 8192 : 0;
0038 }
0039 
0040 static inline void _CPU_OR1K_Cache_data_block_prefetch(const void *d_addr)
0041 {
0042   ISR_Level level;
0043 
0044   _ISR_Local_disable (level);
0045 
0046   _OR1K_mtspr(CPU_OR1K_SPR_DCBPR, (uintptr_t) d_addr);
0047 
0048   _ISR_Local_enable(level);
0049 }
0050 
0051 static inline void _CPU_OR1K_Cache_data_block_writeback(const void *d_addr)
0052 {
0053   ISR_Level level;
0054 
0055   _ISR_Local_disable (level);
0056 
0057   _OR1K_mtspr(CPU_OR1K_SPR_DCBWR, (uintptr_t) d_addr);
0058 
0059   _ISR_Local_enable(level);
0060 }
0061 
0062 static inline void _CPU_OR1K_Cache_data_block_lock(const void *d_addr)
0063 {
0064   ISR_Level level;
0065 
0066   _ISR_Local_disable (level);
0067 
0068   _OR1K_mtspr(CPU_OR1K_SPR_DCBLR, (uintptr_t) d_addr);
0069 
0070   _ISR_Local_enable(level);
0071 }
0072 
0073 static inline void _CPU_OR1K_Cache_instruction_block_prefetch
0074 (const void *d_addr)
0075 {
0076   ISR_Level level;
0077 
0078   _ISR_Local_disable (level);
0079 
0080   _OR1K_mtspr(CPU_OR1K_SPR_ICBPR, (uintptr_t) d_addr);
0081 
0082   _ISR_Local_enable(level);
0083 }
0084 
0085 static inline void _CPU_OR1K_Cache_instruction_block_lock
0086 (const void *d_addr)
0087 {
0088   ISR_Level level;
0089 
0090   _ISR_Local_disable (level);
0091 
0092   _OR1K_mtspr(CPU_OR1K_SPR_ICBLR, (uintptr_t) d_addr);
0093 
0094   _ISR_Local_enable(level);
0095 }
0096 
0097 /* Implement RTEMS cache manager functions */
0098 
0099 static void _CPU_cache_freeze_data(void)
0100 {
0101   /* Do nothing */
0102 }
0103 
0104 static void _CPU_cache_unfreeze_data(void)
0105 {
0106   /* Do nothing */
0107 }
0108 
0109 static void _CPU_cache_freeze_instruction(void)
0110 {
0111   /* Do nothing */
0112 }
0113 
0114 static void _CPU_cache_unfreeze_instruction(void)
0115 {
0116   /* Do nothing */
0117 }
0118 
0119 static void _CPU_cache_flush_entire_data(void)
0120 {
0121   size_t addr;
0122   ISR_Level level;
0123 
0124   _ISR_Local_disable (level);
0125 
0126   /* We have only 0 level cache so we do not need to invalidate others */
0127   for (
0128       addr = _CPU_cache_get_data_cache_size(0);
0129       addr > 0;
0130       addr -= CPU_DATA_CACHE_ALIGNMENT
0131   ) {
0132     _OR1K_mtspr(CPU_OR1K_SPR_DCBFR, (uintptr_t) addr);
0133   }
0134 
0135   _ISR_Local_enable (level);
0136 }
0137 
0138 static void _CPU_cache_invalidate_entire_data(void)
0139 {
0140   size_t addr;
0141   ISR_Level level;
0142 
0143   _ISR_Local_disable (level);
0144 
0145   /* We have only 0 level cache so we do not need to invalidate others */
0146   for (
0147       addr = _CPU_cache_get_data_cache_size(0);
0148       addr > 0;
0149       addr -= CPU_DATA_CACHE_ALIGNMENT
0150   ) {
0151     _OR1K_mtspr(CPU_OR1K_SPR_DCBIR, (uintptr_t) addr);
0152   }
0153 
0154   _ISR_Local_enable (level);
0155 }
0156 
0157 static void _CPU_cache_invalidate_entire_instruction(void)
0158 {
0159   size_t addr;
0160   ISR_Level level;
0161 
0162   _ISR_Local_disable (level);
0163 
0164   /* We have only 0 level cache so we do not need to invalidate others */
0165   for (
0166       addr = _CPU_cache_get_instruction_cache_size(0);
0167       addr > 0;
0168       addr -= CPU_INSTRUCTION_CACHE_ALIGNMENT
0169   ) {
0170     _OR1K_mtspr(CPU_OR1K_SPR_ICBIR, (uintptr_t) addr);
0171   }
0172 
0173   /* Flush instructions out of instruction buffer */
0174   __asm__ volatile("l.nop");
0175   __asm__ volatile("l.nop");
0176   __asm__ volatile("l.nop");
0177   __asm__ volatile("l.nop");
0178   __asm__ volatile("l.nop");
0179 
0180   _ISR_Local_enable (level);
0181 }
0182 
0183 /*
0184  * The range functions are copied almost verbatim from the generic
0185  * implementations in c/src/lib/libcpu/shared/src/cache_manager.c. The main
0186  * modification here is avoiding reapeated off/on toggling of the ISR for each
0187  * cache line operation.
0188  */
0189 
0190 static void _CPU_cache_flush_data_range(const void *d_addr, size_t n_bytes)
0191 {
0192   const void * final_address;
0193   ISR_Level level;
0194 
0195  /*
0196   * Set d_addr to the beginning of the cache line; final_address indicates
0197   * the last address_t which needs to be pushed. Increment d_addr and push
0198   * the resulting line until final_address is passed.
0199   */
0200 
0201   if( n_bytes == 0 )
0202     /* Do nothing if number of bytes to flush is zero */
0203     return;
0204 
0205   final_address = (void *)((size_t)d_addr + n_bytes - 1);
0206   d_addr = (void *)((size_t)d_addr & ~(CPU_DATA_CACHE_ALIGNMENT - 1));
0207 
0208   if( final_address - d_addr > _CPU_cache_get_data_cache_size(0) ) {
0209     /*
0210      * Avoid iterating over the whole cache multiple times if the range is
0211      * larger than the cache size.
0212      */
0213     _CPU_cache_flush_entire_data();
0214     return;
0215   }
0216 
0217   _ISR_Local_disable (level);
0218 
0219   while( d_addr <= final_address )  {
0220     _OR1K_mtspr(CPU_OR1K_SPR_DCBFR, (uintptr_t) d_addr);
0221     d_addr = (void *)((size_t)d_addr + CPU_DATA_CACHE_ALIGNMENT);
0222   }
0223 
0224   _ISR_Local_enable (level);
0225 }
0226 
0227 static void _CPU_cache_invalidate_data_range(const void *d_addr, size_t n_bytes)
0228 {
0229   const void * final_address;
0230   ISR_Level level;
0231 
0232  /*
0233   * Set d_addr to the beginning of the cache line; final_address indicates
0234   * the last address_t which needs to be pushed. Increment d_addr and push
0235   * the resulting line until final_address is passed.
0236   */
0237 
0238   if( n_bytes == 0 )
0239     /* Do nothing if number of bytes to flush is zero */
0240     return;
0241 
0242   final_address = (void *)((size_t)d_addr + n_bytes - 1);
0243   d_addr = (void *)((size_t)d_addr & ~(CPU_DATA_CACHE_ALIGNMENT - 1));
0244 
0245   if( final_address - d_addr > _CPU_cache_get_data_cache_size(0) ) {
0246     /*
0247      * Avoid iterating over the whole cache multiple times if the range is
0248      * larger than the cache size.
0249      */
0250     _CPU_cache_invalidate_entire_data();
0251     return;
0252   }
0253 
0254   _ISR_Local_disable (level);
0255 
0256   while( d_addr <= final_address )  {
0257     _OR1K_mtspr(CPU_OR1K_SPR_DCBIR, (uintptr_t) d_addr);
0258     d_addr = (void *)((size_t)d_addr + CPU_DATA_CACHE_ALIGNMENT);
0259   }
0260 
0261   _ISR_Local_enable (level);
0262 }
0263 
0264 static void _CPU_cache_invalidate_instruction_range(const void *i_addr, size_t n_bytes)
0265 {
0266   const void * final_address;
0267   ISR_Level level;
0268 
0269  /*
0270   * Set i_addr to the beginning of the cache line; final_address indicates
0271   * the last address_t which needs to be pushed. Increment i_addr and push
0272   * the resulting line until final_address is passed.
0273   */
0274 
0275   if( n_bytes == 0 )
0276     /* Do nothing if number of bytes to flush is zero */
0277     return;
0278 
0279   final_address = (void *)((size_t)i_addr + n_bytes - 1);
0280   i_addr = (void *)((size_t)i_addr & ~(CPU_INSTRUCTION_CACHE_ALIGNMENT - 1));
0281 
0282   if( final_address - i_addr > _CPU_cache_get_data_cache_size(0) ) {
0283     /*
0284      * Avoid iterating over the whole cache multiple times if the range is
0285      * larger than the cache size.
0286      */
0287     _CPU_cache_invalidate_entire_instruction();
0288     return;
0289   }
0290 
0291   _ISR_Local_disable (level);
0292 
0293   while( i_addr <= final_address )  {
0294     _OR1K_mtspr(CPU_OR1K_SPR_ICBIR, (uintptr_t) i_addr);
0295     i_addr = (void *)((size_t)i_addr + CPU_DATA_CACHE_ALIGNMENT);
0296   }
0297 
0298   _ISR_Local_enable (level);
0299 }
0300 
0301 static void _CPU_cache_enable_data(void)
0302 {
0303   uint32_t sr;
0304   ISR_Level level;
0305 
0306   _ISR_Local_disable (level);
0307 
0308   sr = _OR1K_mfspr(CPU_OR1K_SPR_SR);
0309   _OR1K_mtspr(CPU_OR1K_SPR_SR, sr | CPU_OR1K_SPR_SR_DCE);
0310 
0311   _ISR_Local_enable(level);
0312 }
0313 
0314 static void _CPU_cache_disable_data(void)
0315 {
0316   uint32_t sr;
0317   ISR_Level level;
0318 
0319   _ISR_Local_disable (level);
0320 
0321   sr = _OR1K_mfspr(CPU_OR1K_SPR_SR);
0322   _OR1K_mtspr(CPU_OR1K_SPR_SR, (sr & ~CPU_OR1K_SPR_SR_DCE));
0323 
0324   _ISR_Local_enable(level);
0325 }
0326 
0327 static void _CPU_cache_enable_instruction(void)
0328 {
0329   uint32_t sr;
0330   ISR_Level level;
0331 
0332   _ISR_Local_disable (level);
0333 
0334   sr = _OR1K_mfspr(CPU_OR1K_SPR_SR);
0335   _OR1K_mtspr(CPU_OR1K_SPR_SR, sr | CPU_OR1K_SPR_SR_ICE);
0336 
0337   _ISR_Local_enable(level);
0338 }
0339 
0340 static void _CPU_cache_disable_instruction(void)
0341 {
0342   uint32_t sr;
0343   ISR_Level level;
0344 
0345   _ISR_Local_disable (level);
0346 
0347   sr = _OR1K_mfspr(CPU_OR1K_SPR_SR);
0348   _OR1K_mtspr(CPU_OR1K_SPR_SR, (sr & ~CPU_OR1K_SPR_SR_ICE));
0349 
0350   _ISR_Local_enable(level);
0351 }
0352 
0353 #include "../../../shared/cache/cacheimpl.h"