Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:23:58

0001 /*
0002  *  mmuAsm.S
0003  *
0004  *  Copyright (C) 1999 Eric Valette (valette@crf.canon.fr)
0005  *
0006  *  This file contains the low-level support for various MMU
0007  *  features.
0008  *
0009  *  The license and distribution terms for this file may be
0010  *  found in the file LICENSE in this distribution or at
0011  *  http://www.rtems.org/license/LICENSE.
0012  *
0013  *  T. Straumann - 11/2001: added support for 7400 (no AltiVec yet)
0014  *  S.K. Feng    - 10/2003: added support for 7455 (no AltiVec yet)
0015  *
0016  */
0017 
0018 #include <rtems/asm.h>
0019 #include <rtems/score/cpu.h>
0020 #include <libcpu/io.h>
0021 #include <libcpu/bat.h>
0022 
0023 /* Unfortunately, the CPU types defined in cpu.h are
0024  * an 'enum' type and hence not available :-(
0025  */
0026 #define PPC_601   0x1
0027 #define PPC_603   0x3
0028 #define PPC_604   0x4
0029 #define PPC_603e  0x6
0030 #define PPC_603ev 0x7
0031 #define PPC_750   0x8
0032 #define PPC_604e  0x9
0033 #define PPC_604r  0xA
0034 #define PPC_7400  0xC
0035 #define PPC_7455  0x8001
0036 #define PPC_7457  0x8002
0037 #define PPC_620   0x16
0038 #define PPC_860   0x50
0039 #define PPC_821   PPC_860
0040 #define PPC_8260  0x81
0041 #define PPC_8240  PPC_8260
0042 
0043 /* ALTIVEC instructions (not recognized by off-the shelf gcc yet) */
0044 #define DSSALL  .long   0x7e00066c      /* DSSALL altivec instruction opcode */
0045 
0046 /* A couple of defines to make the code more readable */
0047 #define CACHE_LINE_SIZE 32
0048 
0049 #ifndef MSSCR0
0050 #define MSSCR0   1014
0051 #endif
0052 
0053 #define DL1HWF  (1<<(31-8))
0054 #define L2HWF   (1<<(31-20))
0055 
0056 #FIXME Should really move this to C code
0057 
0058     .globl L1_caches_enables
0059     .type  L1_caches_enables, @function
0060 
0061 L1_caches_enables:
0062     /*
0063      * Enable caches and 604-specific features if necessary.
0064      */
0065     mfspr   r9,PPC_PVR
0066     rlwinm  r9,r9,16,16,31
0067     cmpi    0,r9,PPC_601
0068     beq 4f          /* not needed for 601 */
0069     mfspr   r11,HID0
0070     andi.   r0,r11,HID0_DCE
0071     ori r11,r11,HID0_ICE|HID0_DCE
0072     ori r8,r11,HID0_ICFI
0073     bne 3f          /* don't invalidate the D-cache */
0074     ori r8,r8,HID0_DCI      /* unless it wasn't enabled */
0075 3:
0076     sync
0077     mtspr   HID0,r8         /* enable and invalidate caches */
0078     sync
0079     mtspr   HID0,r11        /* enable caches */
0080     sync
0081     isync
0082     cmpi    1,r9,PPC_604    /* check for 604 */
0083     cmpi    2,r9,PPC_604e   /* or 604e */
0084     cmpi    3,r9,PPC_604r   /* or mach5 */
0085     cror    6,6,10
0086     cror    6,6,14
0087     cmpi    2,r9,PPC_750    /* or 750 */
0088     cror    6,6,10
0089     cmpi    2,r9,PPC_7400   /* or 7400 */
0090     cror    6,6,10
0091     cmpli   0,r9,PPC_7455   /* or 7455 */
0092     beq     1f
0093     cmpli   0,r9,PPC_7457   /* or 7457 */
0094     bne     2f
0095 1:
0096     /* 7455:link register stack,branch folding &
0097      * TBEN : enable the time base and decrementer.
0098      * EMCP bit is defined in HID1. However, it's not used
0099      * in mvme5500 board because of GT64260 (e.g. it's connected
0100      * pull-up).
0101      */
0102     oris    r11,r11,(HID0_LRSTK|HID0_FOLD|HID0_TBEN)@h
0103     ori     r11,r11,(HID0_LRSTK|HID0_FOLD|HID0_TBEN)@l
0104 2:  cror    2,2,10
0105     bne 3f
0106     ori r11,r11,HID0_BTIC   /* enable branch tgt cache on 7400 , 7455 , 7457 */
0107 3:  cror    2,2,6
0108     bne 4f
0109     /* on 7400 SIED is actually SGE (store gathering enable) */
0110     ori r11,r11,HID0_SIED|HID0_BHTE /* for 604[e], enable */
0111     bne 2,5f
0112     ori r11,r11,HID0_BTCD
0113 5:  mtspr   HID0,r11        /* superscalar exec & br history tbl */
0114     sync             /* for SGE bit */
0115     isync                /* P2-17 to 2-22 in MPC7450UM */
0116 4:
0117     blr
0118 
0119     .globl get_L1CR
0120 .type  get_L1CR, @function
0121 get_L1CR:
0122     mfspr   r3,HID0
0123     blr
0124 
0125     .globl get_L2CR
0126     .type  get_L2CR, @function
0127 get_L2CR:
0128     /* Make sure this is a > 750 chip */
0129     mfspr   r3,PPC_PVR
0130     rlwinm  r3,r3,16,16,31
0131     cmplwi  r3,PPC_750  /* it's a 750 */
0132     beq 1f
0133     cmplwi  r3,PPC_7400 /* it's a 7400 */
0134     beq 1f
0135     cmplwi  r3,PPC_7455 /* it's a 7455 */
0136     beq 1f
0137     cmplwi  r3,PPC_7457 /* it's a 7457 */
0138     beq 1f
0139     li  r3,-1
0140     blr
0141 
0142 1:
0143     /* Return the L2CR contents */
0144     mfspr   r3,L2CR
0145     blr
0146 
0147     .globl set_L2CR
0148     .type  set_L2CR, @function
0149 set_L2CR:
0150     /* Usage:
0151      * When setting the L2CR register, you must do a few special things.
0152      * If you are enabling the cache, you must perform a global invalidate.
0153      * If you are disabling the cache, you must flush the cache contents first.
0154      * This routine takes care of doing these things.  When first
0155      * enabling the cache, make sure you pass in the L2CR you want, as well as
0156      * passing in the global invalidate bit set.  A global invalidate will
0157      * only be performed if the L2I bit is set in applyThis.  When enabling
0158      * the cache, you should also set the L2E bit in applyThis.  If you
0159      * want to modify the L2CR contents after the cache has been enabled,
0160      * the recommended procedure is to first call __setL2CR(0) to disable
0161      * the cache and then call it again with the new values for L2CR.  Examples:
0162      *
0163      *  _setL2CR(0)     -   disables the cache
0164      *  _setL2CR(0xb9A14000)    -   enables my G3 MCP750 card:
0165      *              -   L2E set to turn on the cache
0166      *              -   L2SIZ set to 1MB
0167      *              -   L2CLK set to %2
0168      *              -   L2RAM set to pipelined syncronous late-write
0169      *              -   L2I set to perform a global invalidation
0170      *              -   L2OH set to 1 nS
0171      *
0172      * A similar call should work for your card.  You need to know the correct
0173      * setting for your card and then place them in the fields I have outlined
0174      * above.  Other fields support optional features, such as L2DO which caches
0175      * only data, or L2TS which causes cache pushes from the L1 cache to go to
0176      *the L2 cache instead of to main memory.
0177      */
0178 
0179     /* Make sure this is a > 750 chip */
0180     mfspr   r0,PPC_PVR
0181     rlwinm  r0,r0,16,16,31
0182     cmplwi  r0,PPC_750
0183     beq thisIs750
0184     cmplwi  r0,PPC_7400
0185     beq thisIs750
0186     cmplwi  r0,PPC_7455
0187     beq thisIs750
0188     cmplwi  r0,PPC_7457
0189     beq thisIs750
0190     li  r3,-1
0191     blr
0192 
0193 thisIs750:
0194     /* Get the current enable bit of the L2CR into r4 */
0195     mfspr   r4,L2CR
0196     rlwinm  r4,r4,0,0,0
0197 
0198     /* See if we want to perform a global inval this time. */
0199     rlwinm  r6,r3,0,10,10       /* r6 contains the new invalidate bit */
0200     rlwinm. r5,r3,0,0,0     /* r5 contains the new enable bit */
0201     rlwinm  r3,r3,0,11,9        /* Turn off the invalidate bit */
0202     rlwinm  r3,r3,0,1,31        /* Turn off the enable bit */
0203     or  r3,r3,r4        /* Keep the enable bit the same as it was for now. */
0204     mfmsr   r7          /* shut off interrupts around critical flush/invalidate sections */
0205     rlwinm  r4,r7,0,17,15       /* Turn off EE bit - an external exception while we are flushing
0206                                    the cache is fatal (comment this line and see!) */
0207     mtmsr   r4
0208     bne dontDisableCache    /* Only disable the cache if L2CRApply has the enable bit off */
0209 
0210     cmplwi  r0,PPC_7400     /* 7400 ? */
0211     bne disableCache        /* use traditional method */
0212 
0213     /* On the 7400, they recommend using the hardware flush feature */
0214     DSSALL                      /* stop all data streams */
0215     sync
0216     /* we wouldn't have to flush L1, but for sake of consistency with the other code we do it anyway */
0217     mfspr   r4, MSSCR0
0218     oris    r4, r4, DL1HWF@h
0219     mtspr   MSSCR0, r4
0220     sync
0221     /* L1 flushed */
0222     mfspr   r4, L2CR
0223     ori r4, r4, L2HWF
0224     mtspr   L2CR, r4
0225     sync
0226     /* L2 flushed */
0227     b   flushDone
0228 
0229 disableCache:
0230     /* Disable the cache.  First, we turn off data relocation. */
0231     rlwinm  r4,r4,0,28,26       /* Turn off DR bit */
0232     cmplwi  r0,PPC_7455             /* 7455 ? */
0233     beq     1f
0234     cmplwi  r0,PPC_7457             /* 7457 ? */
0235     bne not745x
0236 1:
0237     /* 745x:L1 Load/Flush, L2, L3 :  hardware flush */
0238     DSSALL
0239     mtmsr   r4
0240     sync
0241     isync
0242     mfspr   r4, MSSCR0
0243     rlwinm  r4,r4,0,29,0        /* Turn off the L2PFE bits */
0244     mtspr   MSSCR0, r4
0245     sync
0246     /* flush L1 first */
0247     lis r4,0x0001
0248     mtctr   r4
0249     li  r4,0
0250     li      r0,0
0251 loadFlush:
0252     lwzx    r0,r0,r4
0253     dcbf    r0,r4
0254     addi    r4,r4,CACHE_LINE_SIZE   /* Go to start of next cache line */
0255     bdnz    loadFlush
0256     sync
0257     /* Set the L2CR[L2IO & L2DO] bits to completely lock the L2 cache */
0258     mfspr   r0, L2CR
0259     lis     r4,L2CR_LOCK_745x@h
0260     ori     r4,r4,L2CR_LOCK_745x@l
0261     or      r4,r0,r4
0262     rlwinm  r4,r4,0,11,9           /* make sure the invalidate bit off */
0263     mtspr   L2CR, r4
0264     sync
0265     ori r4, r4, L2HWF
0266     mtspr   L2CR, r4
0267     sync
0268     /* L2 flushed,L2IO & L2DO got cleared in the dontDisableCache:  */
0269     b   reenableDR
0270 
0271 not745x:
0272     sync
0273     mtmsr   r4
0274     isync
0275     /*
0276         Now, read the first 2MB of memory to put new data in the cache.
0277         (Actually we only need the size of the L2 cache plus
0278         the size of the L1 cache, but 2MB will cover everything just to be safe).
0279     */
0280     lis r4,0x0001
0281     mtctr   r4
0282     li  r4,0
0283 loadLoop:
0284     lwzx    r0,r0,r4
0285     addi    r4,r4,CACHE_LINE_SIZE   /* Go to start of next cache line */
0286     bdnz    loadLoop
0287 
0288     /* Now, flush the first 2MB of memory */
0289     lis     r4,0x0001
0290     mtctr   r4
0291     li      r4,0
0292     sync
0293 flushLoop:
0294     dcbf    r0,r4
0295     addi    r4,r4,CACHE_LINE_SIZE   /* Go to start of next cache line */
0296     bdnz    flushLoop
0297 reenableDR:
0298     rlwinm  r4,r7,0,17,15       /* still mask EE but reenable data relocation */
0299     sync
0300     mtmsr   r4
0301     isync
0302 
0303 flushDone:
0304 
0305     /* Turn off the L2CR enable bit. */
0306     rlwinm  r3,r3,0,1,31
0307 
0308 dontDisableCache:
0309     /* Set up the L2CR configuration bits */
0310     sync
0311     mtspr   L2CR,r3
0312     sync
0313     cmplwi  r6,0
0314     beq noInval
0315 
0316     /* Perform a global invalidation */
0317     oris    r3,r3,0x0020
0318     sync
0319     mtspr   L2CR,r3
0320     sync
0321 invalCompleteLoop:              /* Wait for the invalidation to complete */
0322     mfspr   r3,L2CR
0323     rlwinm. r4,r3,0,31,31
0324     bne invalCompleteLoop
0325 
0326     rlwinm  r3,r3,0,11,9;       /* Turn off the L2I bit */
0327     sync
0328     mtspr   L2CR,r3
0329 
0330 noInval:
0331     sync
0332     /* re-enable interrupts, i.e. restore original MSR */
0333     mtmsr   r7                  /* (no sync needed) */
0334     /* See if we need to enable the cache */
0335     cmplwi  r5,0
0336     beqlr
0337 
0338 enableCache:
0339     /* Enable the cache */
0340     oris    r3,r3,0x8000
0341     mtspr   L2CR,r3
0342     sync
0343     blr
0344 
0345 
0346     .globl get_L3CR
0347     .type  get_L3CR, @function
0348 get_L3CR:
0349     /* Make sure this is a 7455 chip */
0350     mfspr   r3,PPC_PVR
0351     rlwinm  r3,r3,16,16,31
0352     cmplwi  r3,PPC_7455 /* it's a 7455 */
0353     beq 1f
0354     cmplwi  r3,PPC_7457 /* it's a 7457 */
0355     beq 1f
0356     li  r3,-1
0357     blr
0358 
0359 1:
0360     /* Return the L3CR contents */
0361     mfspr   r3,L3CR
0362     blr
0363 
0364     .globl set_L3CR
0365     .type  set_L3CR, @function
0366 set_L3CR:
0367     /* Usage:
0368      * When setting the L3CR register, you must do a few special things.
0369      * If you are enabling the cache, you must perform a global invalidate.
0370      * Then call cpu_enable_l3cr(l3cr).
0371      * If you are disabling the cache, you must flush the cache contents first.
0372      * This routine takes care of doing these things.  If you
0373      * want to modify the L3CR contents after the cache has been enabled,
0374      * the recommended procedure is to first call __setL3CR(0) to disable
0375      * the cache and then call cpu_enable_l3cr with the new values for
0376      * L3CR.
0377      */
0378 
0379     /* Make sure this is a 7455 chip */
0380     mfspr   r0,PPC_PVR
0381     rlwinm  r0,r0,16,16,31
0382     cmplwi  r0,PPC_7455
0383     beq thisIs7455
0384     cmplwi  r0,PPC_7457
0385     beq thisIs7455
0386     li  r3,-1
0387     blr
0388 
0389 thisIs7455:
0390     /* Get the current enable bit of the L3CR into r4 */
0391     mfspr   r4,L3CR
0392     rlwinm  r4,r4,0,0,0
0393 
0394     /* See if we want to perform a global inval this time. */
0395     rlwinm  r6,r3,0,10,10       /* r6 contains the new invalidate bit */
0396     rlwinm. r5,r3,0,0,0     /* r5 contains the new enable bit */
0397     rlwinm  r3,r3,0,11,9        /* Turn off the invalidate bit */
0398     rlwinm  r3,r3,0,1,31        /* Turn off the enable bit */
0399     or  r3,r3,r4        /* Keep the enable bit the same as it was for now. */
0400     mfmsr   r7          /* shut off interrupts around critical flush/invalidate sections */
0401     rlwinm  r4,r7,0,17,15       /* Turn off EE bit - an external exception while we are flushing
0402                                    the cache is fatal (comment this line and see!) */
0403     mtmsr   r4
0404     bne dontDisableL3Cache  /* Only disable the cache if L3CRApply has the enable bit off */
0405     /* Before the L3 is disabled, it must be flused to prevent coherency problems */
0406     /* First, we turn off data relocation. */
0407     rlwinm  r4,r4,0,28,26       /* Turn off DR bit */
0408     DSSALL
0409     sync
0410     mtmsr   r4
0411     isync               /* make sure memory accesses have completed */
0412     /* 7455: L3 :    hardware flush
0413      * Set the L3CR[L3IO & L3DO] bits to completely lock the L3 cache */
0414     mfspr   r0, L3CR
0415     lis     r4, L3CR_LOCK_745x@h
0416     ori     r4,r4, L3CR_LOCK_745x@l
0417     or      r4,r0,r4
0418     rlwinm  r4,r4,0,11,9           /* make sure the invalidate bit off */
0419     mtspr   L3CR, r4
0420     sync
0421     ori r4, r4, L3CR_L3HWF
0422     mtspr   L3CR, r4
0423     sync
0424     /* L3 flushed,L3IO & L3DO got cleared in the dontDisableL3Cache:  */
0425     rlwinm  r4,r7,0,17,15       /* still mask EE but reenable data relocation */
0426     sync
0427     mtmsr   r4
0428     isync
0429 
0430     /* Turn off the L3CR enable bit. */
0431     rlwinm  r3,r3,0,1,31
0432 
0433 dontDisableL3Cache:
0434     /* Set up the L3CR configuration bits */
0435     sync
0436     mtspr   L3CR,r3
0437     sync
0438 ifL3Inval:
0439     cmplwi  r6,0
0440     beq noL3Inval
0441 
0442     /* Perform a global invalidation */
0443     oris    r3,r3,0x0020
0444     sync
0445     mtspr   L3CR,r3
0446     sync
0447 invalCompleteL3:                /* Wait for the invalidation to complete */
0448     mfspr   r3,L3CR
0449     rlwinm. r4,r3,0,31,31
0450     bne invalCompleteL3
0451 
0452     rlwinm  r3,r3,0,11,9;       /* Turn off the L3I bit */
0453     sync
0454     mtspr   L3CR,r3
0455     sync
0456 
0457 noL3Inval:
0458     /* re-enable interrupts, i.e. restore original MSR */
0459     mtmsr   r7                  /* (no sync needed) */
0460     /* See if we need to enable the cache */
0461     cmplwi  r5,0
0462     beqlr
0463 
0464 enableL3Cache:
0465     /* Enable the cache */
0466     oris    r3,r3,0x8000
0467     mtspr   L3CR,r3
0468     sync
0469     blr
0470 
0471 /*
0472  * An undocumented "feature" of 604e requires that the v bit
0473  * be cleared before changing BAT values.
0474  *
0475  * Also, newer IBM firmware does not clear bat3 and 4 so
0476  * this makes sure it's done.
0477  *  -- Cort
0478  */
0479     .globl  CPU_clear_bats_early
0480     .type   CPU_clear_bats_early,@function
0481 CPU_clear_bats_early:
0482     li      r3,0
0483     mfspr   r4,PPC_PVR
0484     rlwinm  r4,r4,16,16,31      /* r4 = 1 for 601, 4 for 604 */
0485     cmpwi   r4, 1
0486     sync
0487     isync
0488     beq 1f
0489     cmplwi  r4,0x8001           /* 7445, 7455 (0x8001), 7447, 7457 (0x8002)      */
0490     blt 2f                      /* 7447a (0x8003) and 7448 (0x8004) have 16 bats */
0491     cmplwi  r4,0x8004
0492     bgt 2f
0493     mtspr   DBAT4U,r3
0494     mtspr   DBAT4L,r3
0495     mtspr   DBAT5U,r3
0496     mtspr   DBAT5L,r3
0497     mtspr   DBAT6U,r3
0498     mtspr   DBAT6L,r3
0499     mtspr   DBAT7U,r3
0500     mtspr   DBAT7L,r3
0501     mtspr   IBAT4U,r3
0502     mtspr   IBAT4L,r3
0503     mtspr   IBAT5U,r3
0504     mtspr   IBAT5L,r3
0505     mtspr   IBAT6U,r3
0506     mtspr   IBAT6L,r3
0507     mtspr   IBAT7U,r3
0508     mtspr   IBAT7L,r3
0509 2:
0510     mtspr   DBAT0U,r3
0511     mtspr   DBAT0L,r3
0512     mtspr   DBAT1U,r3
0513     mtspr   DBAT1L,r3
0514     mtspr   DBAT2U,r3
0515     mtspr   DBAT2L,r3
0516     mtspr   DBAT3U,r3
0517     mtspr   DBAT3L,r3
0518 1:
0519     mtspr   IBAT0U,r3
0520     mtspr   IBAT0L,r3
0521     mtspr   IBAT1U,r3
0522     mtspr   IBAT1L,r3
0523     mtspr   IBAT2U,r3
0524     mtspr   IBAT2L,r3
0525     mtspr   IBAT3U,r3
0526     mtspr   IBAT3L,r3
0527     sync
0528     isync
0529     blr
0530