Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:23:40

0001 /*  Blackfin Cache Support
0002  *
0003  *  Copyright (c) 2008 Kallisti Labs, Los Gatos, CA, USA
0004  *             written by Allan Hessenflow <allanh@kallisti.com>
0005  *
0006  *  The license and distribution terms for this file may be
0007  *  found in the file LICENSE in this distribution or at
0008  *  http://www.rtems.org/license/LICENSE.
0009  */
0010 
0011 
0012 #include <rtems.h>
0013 #include <bsp.h>
0014 #include <libcpu/memoryRegs.h>
0015 
0016 #define CPU_DATA_CACHE_ALIGNMENT          32
0017 #define CPU_INSTRUCTION_CACHE_ALIGNMENT   32
0018 
0019 #ifdef BSP_DATA_CACHE_CONFIG
0020 #define LIBCPU_DATA_CACHE_CONFIG BSP_DATA_CACHE_CONFIG
0021 #else
0022 /* use 16K of each SRAM bank */
0023 #define LIBCPU_DATA_CACHE_CONFIG (3 << DMEM_CONTROL_DMC_SHIFT)
0024 #endif
0025 
0026 /* There are many syncs in the following code because they should be
0027    harmless except for wasting time, and this is easier than figuring out
0028    exactly where they're needed to protect from the effects of write
0029    buffers and queued reads.  Many of them are likely unnecessary. */
0030 
0031 
0032 static void _CPU_cache_flush_1_data_line(const void *d_addr) {
0033 
0034   __asm__ __volatile__ ("ssync; flush [%0]; ssync" :: "a" (d_addr));
0035 }
0036 
0037 /* Blackfins can't just invalidate cache; they can only do flush +
0038    invalidate.  If the line isn't dirty then this is equivalent to
0039    just an invalidate.  Even if it is dirty, this should still be
0040    okay since with a pure invalidate method the caller would have no
0041    way to insure the dirty line hadn't been written out anyway prior
0042    to the invalidate. */
0043 static void _CPU_cache_invalidate_1_data_line(const void *d_addr) {
0044 
0045   __asm__ __volatile__ ("ssync; flushinv [%0]; ssync" :: "a" (d_addr));
0046 }
0047 
0048 static void _CPU_cache_freeze_data(void) {
0049 }
0050 
0051 static void _CPU_cache_unfreeze_data(void) {
0052 }
0053 
0054 static void _CPU_cache_invalidate_1_instruction_line(const void *d_addr) {
0055 
0056   __asm__ __volatile__ ("ssync; iflush [%0]; ssync" :: "a" (d_addr));
0057 }
0058 
0059 static void _CPU_cache_freeze_instruction(void) {
0060 }
0061 
0062 static void _CPU_cache_unfreeze_instruction(void) {
0063 }
0064 
0065 /* incredibly inefficient...  It would be better to make use of the
0066    DTEST_COMMAND/DTEST_DATAx registers to find the addresses in each
0067    cache line and flush just those.  However the documentation I've
0068    seen on those is a bit sketchy, and I sure wouldn't want to get it
0069    wrong. */
0070 static void _CPU_cache_flush_entire_data(void) {
0071   uint32_t i;
0072 
0073   i = 0;
0074   __asm__ __volatile__ ("ssync");
0075   do {
0076       __asm__ __volatile__ ("flush [%0]" :: "a" (i));
0077       i += CPU_DATA_CACHE_ALIGNMENT;
0078   } while (i);
0079   __asm__ __volatile__ ("ssync");
0080 }
0081 
0082 static void _CPU_cache_invalidate_entire_data(void) {
0083   uint32_t dmemControl;
0084 
0085   __asm__ __volatile__ ("ssync");
0086   dmemControl = *(uint32_t volatile *) DMEM_CONTROL;
0087   *(uint32_t volatile *) DMEM_CONTROL = dmemControl & ~DMEM_CONTROL_DMC_MASK;
0088   *(uint32_t volatile *) DMEM_CONTROL = dmemControl;
0089   __asm__ __volatile__ ("ssync");
0090 }
0091 
0092 /* this does not actually enable data cache unless CPLBs are also enabled.
0093    LIBCPU_DATA_CACHE_CONFIG contains the DMEM_CONTROL_DMC bits to set. */
0094 static void _CPU_cache_enable_data(void) {
0095 
0096   __asm__ __volatile__ ("ssync");
0097   *(uint32_t volatile *) DMEM_CONTROL |= LIBCPU_DATA_CACHE_CONFIG;
0098   __asm__ __volatile__ ("ssync");
0099 }
0100 
0101 static void _CPU_cache_disable_data(void) {
0102 
0103   __asm__ __volatile__ ("ssync");
0104   *(uint32_t volatile *) DMEM_CONTROL &= ~DMEM_CONTROL_DMC_MASK;
0105   __asm__ __volatile__ ("ssync");
0106 }
0107 
0108 static void _CPU_cache_invalidate_entire_instruction(void) {
0109   uint32_t imemControl;
0110 
0111   __asm__ __volatile__ ("ssync");
0112   imemControl = *(uint32_t volatile *) IMEM_CONTROL;
0113   *(uint32_t volatile *) IMEM_CONTROL = imemControl & ~IMEM_CONTROL_IMC;
0114   *(uint32_t volatile *) IMEM_CONTROL = imemControl;
0115   __asm__ __volatile__ ("ssync");
0116 }
0117 
0118 /* this only actually enables the instruction cache if the CPLBs are also
0119    enabled. */
0120 static void _CPU_cache_enable_instruction(void) {
0121 
0122   __asm__ __volatile__ ("ssync");
0123   *(uint32_t volatile *) IMEM_CONTROL |= IMEM_CONTROL_IMC;
0124   __asm__ __volatile__ ("ssync");
0125 }
0126 
0127 static void _CPU_cache_disable_instruction(void) {
0128 
0129   __asm__ __volatile__ ("ssync");
0130   *(uint32_t volatile *) IMEM_CONTROL &= ~IMEM_CONTROL_IMC;
0131   __asm__ __volatile__ ("ssync");
0132 }
0133 
0134 #include "../../../shared/cache/cacheimpl.h"