Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:23:53

0001 /*
0002  * pgtable.h
0003  *
0004  *  PowerPC memory management structures
0005  *
0006  * It is a stripped down version of linux ppc file...
0007  *
0008  * Copyright (C) 1999  Eric Valette (valette@crf.canon.fr)
0009  *                     Canon Centre Recherche France.
0010  *
0011  *  The license and distribution terms for this file may be
0012  *  found in the file LICENSE in this distribution or at
0013  *  http://www.rtems.org/license/LICENSE.
0014  */
0015 
0016 #ifndef _LIBCPU_PGTABLE_H
0017 #define _LIBCPU_PGTABLE_H
0018 
0019 /*
0020  * The PowerPC MMU uses a hash table containing PTEs, together with
0021  * a set of 16 segment registers (on 32-bit implementations), to define
0022  * the virtual to physical address mapping.
0023  *
0024  * We use the hash table as an extended TLB, i.e. a cache of currently
0025  * active mappings.  We maintain a two-level page table tree, much like
0026  * that used by the i386, for the sake of the Linux memory management code.
0027  * Low-level assembler code in head.S (procedure hash_page) is responsible
0028  * for extracting ptes from the tree and putting them into the hash table
0029  * when necessary, and updating the accessed and modified bits in the
0030  * page table tree.
0031  *
0032  * The PowerPC MPC8xx uses a TLB with hardware assisted, software tablewalk.
0033  * We also use the two level tables, but we can put the real bits in them
0034  * needed for the TLB and tablewalk.  These definitions require Mx_CTR.PPM = 0,
0035  * Mx_CTR.PPCS = 0, and MD_CTR.TWAM = 1.  The level 2 descriptor has
0036  * additional page protection (when Mx_CTR.PPCS = 1) that allows TLB hit
0037  * based upon user/super access.  The TLB does not have accessed nor write
0038  * protect.  We assume that if the TLB get loaded with an entry it is
0039  * accessed, and overload the changed bit for write protect.  We use
0040  * two bits in the software pte that are supposed to be set to zero in
0041  * the TLB entry (24 and 25) for these indicators.  Although the level 1
0042  * descriptor contains the guarded and writethrough/copyback bits, we can
0043  * set these at the page level since they get copied from the Mx_TWC
0044  * register when the TLB entry is loaded.  We will use bit 27 for guard, since
0045  * that is where it exists in the MD_TWC, and bit 26 for writethrough.
0046  * These will get masked from the level 2 descriptor at TLB load time, and
0047  * copied to the MD_TWC before it gets loaded.
0048  */
0049 
0050 /* PMD_SHIFT determines the size of the area mapped by the second-level page tables */
0051 #define PMD_SHIFT   22
0052 #define PMD_SIZE    (1UL << PMD_SHIFT)
0053 #define PMD_MASK    (~(PMD_SIZE-1))
0054 
0055 /* PGDIR_SHIFT determines what a third-level page table entry can map */
0056 #define PGDIR_SHIFT 22
0057 #define PGDIR_SIZE  (1UL << PGDIR_SHIFT)
0058 #define PGDIR_MASK  (~(PGDIR_SIZE-1))
0059 
0060 /*
0061  * entries per page directory level: our page-table tree is two-level, so
0062  * we don't really have any PMD directory.
0063  */
0064 #define PTRS_PER_PTE    1024
0065 #define PTRS_PER_PMD    1
0066 #define PTRS_PER_PGD    1024
0067 #define USER_PTRS_PER_PGD   (TASK_SIZE / PGDIR_SIZE)
0068 
0069 /* Just any arbitrary offset to the start of the vmalloc VM area: the
0070  * current 64MB value just means that there will be a 64MB "hole" after the
0071  * physical memory until the kernel virtual memory starts.  That means that
0072  * any out-of-bounds memory accesses will hopefully be caught.
0073  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
0074  * area for the same reason. ;)
0075  *
0076  * We no longer map larger than phys RAM with the BATs so we don't have
0077  * to worry about the VMALLOC_OFFSET causing problems.  We do have to worry
0078  * about clashes between our early calls to ioremap() that start growing down
0079  * from ioremap_base being run into the VM area allocations (growing upwards
0080  * from VMALLOC_START).  For this reason we have ioremap_bot to check when
0081  * we actually run into our mappings setup in the early boot with the VM
0082  * system.  This really does become a problem for machines with good amounts
0083  * of RAM.  -- Cort
0084  */
0085 #define VMALLOC_OFFSET (0x4000000) /* 64M */
0086 #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
0087 #define VMALLOC_VMADDR(x) ((unsigned long)(x))
0088 #define VMALLOC_END ioremap_bot
0089 
0090 /*
0091  * Bits in a linux-style PTE.  These match the bits in the
0092  * (hardware-defined) PowerPC PTE as closely as possible.
0093  */
0094 #define _PAGE_PRESENT   0x001   /* software: pte contains a translation */
0095 #define _PAGE_USER  0x002   /* matches one of the PP bits */
0096 #define _PAGE_RW    0x004   /* software: user write access allowed */
0097 #define _PAGE_GUARDED   0x008
0098 #define _PAGE_COHERENT  0x010   /* M: enforce memory coherence (SMP systems) */
0099 #define _PAGE_NO_CACHE  0x020   /* I: cache inhibit */
0100 #define _PAGE_WRITETHRU 0x040   /* W: cache write-through */
0101 #define _PAGE_DIRTY 0x080   /* C: page changed */
0102 #define _PAGE_ACCESSED  0x100   /* R: page referenced */
0103 #define _PAGE_HWWRITE   0x200   /* software: _PAGE_RW & _PAGE_DIRTY */
0104 #define _PAGE_SHARED    0
0105 
0106 #define _PAGE_CHG_MASK  (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
0107 
0108 #define _PAGE_BASE  _PAGE_PRESENT | _PAGE_ACCESSED
0109 #define _PAGE_WRENABLE  _PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE
0110 
0111 #define PAGE_NONE   __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
0112 
0113 #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER | \
0114                  _PAGE_SHARED)
0115 #define PAGE_COPY   __pgprot(_PAGE_BASE | _PAGE_USER)
0116 #define PAGE_READONLY   __pgprot(_PAGE_BASE | _PAGE_USER)
0117 #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED)
0118 #define PAGE_KERNEL_CI  __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | \
0119                  _PAGE_NO_CACHE )
0120 
0121 /*
0122  * The PowerPC can only do execute protection on a segment (256MB) basis,
0123  * not on a page basis.  So we consider execute permission the same as read.
0124  * Also, write permissions imply read permissions.
0125  * This is the closest we can get..
0126  */
0127 #define __P000  PAGE_NONE
0128 #define __P001  PAGE_READONLY
0129 #define __P010  PAGE_COPY
0130 #define __P011  PAGE_COPY
0131 #define __P100  PAGE_READONLY
0132 #define __P101  PAGE_READONLY
0133 #define __P110  PAGE_COPY
0134 #define __P111  PAGE_COPY
0135 
0136 #define __S000  PAGE_NONE
0137 #define __S001  PAGE_READONLY
0138 #define __S010  PAGE_SHARED
0139 #define __S011  PAGE_SHARED
0140 #define __S100  PAGE_READONLY
0141 #define __S101  PAGE_READONLY
0142 #define __S110  PAGE_SHARED
0143 #define __S111  PAGE_SHARED
0144 #endif /* _LIBCPU_PGTABLE_H */