Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:24:10

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /**
0004  * @file
0005  *
0006  * @ingroup RTEMSBSPsX8664AMD64
0007  *
0008  * @brief Paging initialization
0009  */
0010 
0011 /*
0012  * This file sets up page sizes to 1GiB (i.e. huge pages, using only the PML4
0013  * and PDPT, skipping the PDT, and PT).
0014  * We set up identity-page mapping for the 512 GiBs addressable by using static
0015  * PML4 and PDPT tables.
0016  *
0017  * Section 4.5 "4-Level Paging" of Volume 3 of the Intel Software Developer
0018  * Manual guides a lot of the code used in this file.
0019  */
0020 
0021 /*
0022  * Copyright (c) 2018 Amaan Cheval <amaan.cheval@gmail.com>
0023  *
0024  * Redistribution and use in source and binary forms, with or without
0025  * modification, are permitted provided that the following conditions
0026  * are met:
0027  * 1. Redistributions of source code must retain the above copyright
0028  *    notice, this list of conditions and the following disclaimer.
0029  * 2. Redistributions in binary form must reproduce the above copyright
0030  *    notice, this list of conditions and the following disclaimer in the
0031  *    documentation and/or other materials provided with the distribution.
0032  *
0033  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
0034  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0035  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0036  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
0037  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
0038  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
0039  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
0040  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
0041  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
0042  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
0043  * SUCH DAMAGE.
0044  */
0045 
0046 #include <stdio.h>
0047 #include <assert.h>
0048 #include <bsp.h>
0049 #include <rtems.h>
0050 #include <libcpu/page.h>
0051 #include <rtems/score/cpu.h>
0052 
0053 uint64_t amd64_pml4[NUM_PAGE_TABLE_ENTRIES] RTEMS_ALIGNED(4096);
0054 uint64_t amd64_pdpt[NUM_PAGE_TABLE_ENTRIES] RTEMS_ALIGNED(4096);
0055 
0056 bool paging_1gib_pages_supported(void)
0057 {
0058   /*
0059    * If CPUID.80000001H:EDX.Page1GB [bit 26] = 1, 1-GByte pages are supported
0060    * with 4-level paging.
0061    */
0062   uint32_t a, b, c, d;
0063   cpuid(0x80000001, &a, &b, &c, &d);
0064   return (d >> 26) & 1;
0065 }
0066 
0067 uint8_t get_maxphysaddr(void)
0068 {
0069   /*
0070    * CPUID.80000008H:EAX[15:8] reports the linear-address width supported by the
0071    * processor. Generally, this value is 48 if CPUID.80000001H:EDX.LM [bit 29] =
0072    * 1 and 32 otherwise.
0073    */
0074   uint32_t a, b, c, d;
0075   cpuid(0x80000008, &a, &b, &c, &d);
0076 
0077   uint8_t maxphysaddr = (a >> 8) & 0xff;
0078   /* This width is referred to as MAXPHYADDR. MAXPHYADDR is at most 52. */
0079   assert(maxphysaddr <= 52);
0080 
0081   return maxphysaddr;
0082 }
0083 
0084 uint64_t get_mask_for_bits(uint8_t start, uint8_t end)
0085 {
0086   /*
0087    * Create a mask that lets you select bits start:end when logically ANDed with
0088    * a value. For eg.
0089    *   get_mask_for_bits(48, 64) = 0xffff000000000000
0090    */
0091   uint64_t mask = (((uint64_t) 1 << (end - start)) - 1) << start;
0092   return mask;
0093 }
0094 
0095 static inline void assert_0s_from_bit(uint64_t entry, uint8_t bit_pos)
0096 {
0097   /* Confirm that bit_pos:64 are all 0s */
0098   assert((entry & get_mask_for_bits(bit_pos, 64)) == 0);
0099 }
0100 
0101 uint64_t create_cr3_entry(
0102   uint64_t phys_addr, uint8_t maxphysaddr, uint64_t flags
0103 )
0104 {
0105   /* Confirm PML4 address is aligned on a 4KiB boundary */
0106   assert((phys_addr & 0xfff) == 0);
0107   uint64_t entry = (phys_addr & get_mask_for_bits(12, maxphysaddr)) | flags;
0108 
0109   /* Confirm that bits maxphysaddr:64 are 0s */
0110   assert_0s_from_bit(entry, maxphysaddr);
0111   return entry;
0112 }
0113 
0114 uint64_t create_pml4_entry(
0115   uint64_t phys_addr, uint8_t maxphysaddr, uint64_t flags
0116 )
0117 {
0118   /* Confirm address we're writing is aligned on a 4KiB boundary */
0119   assert((phys_addr & 0xfff) == 0);
0120   uint64_t entry = (phys_addr & get_mask_for_bits(12, maxphysaddr)) | flags;
0121 
0122   /*
0123    * Confirm that bits maxphysaddr:64 are 0s; there are other usable bits there
0124    * such as PAGE_FLAGS_NO_EXECUTE, but we're asserting that those aren't set
0125    * either.
0126    */
0127   assert_0s_from_bit(entry, maxphysaddr);
0128   return entry;
0129 }
0130 
0131 uint64_t create_pdpt_entry(
0132   uint64_t phys_addr, uint8_t maxphysaddr, uint64_t flags
0133 )
0134 {
0135   /* Confirm physical address is a 1GiB aligned page address */
0136   assert((phys_addr & 0x3fffffff) == 0);
0137   uint64_t entry = (phys_addr & get_mask_for_bits(30, maxphysaddr)) | flags;
0138 
0139   /*
0140    * Confirm that bits maxphysaddr:64 are 0s; there are other usable bits there
0141    * such as the protection key and PAGE_FLAGS_NO_EXECUTE, but we're asserting
0142    * that those aren't set either.
0143    */
0144   assert_0s_from_bit(entry, maxphysaddr);
0145   return entry;
0146 }
0147 
0148 void paging_init(void)
0149 {
0150   if ( !paging_1gib_pages_supported() ) {
0151     printf("warning: 1 GiB pages aren't supported - trying anyway.\n");
0152   }
0153   const uint8_t maxphysaddr = get_maxphysaddr();
0154   DBG_PRINTF("maxphysaddr = %d\n", maxphysaddr);
0155 
0156   const uint64_t gib = (1 << 30);
0157 
0158   for (uint32_t i = 0; i < NUM_PAGE_TABLE_ENTRIES; i++) {
0159     amd64_pdpt[i] = create_pdpt_entry(
0160       /* This is the i-th GiB for identity-mapping */
0161       (uint64_t) i * gib,
0162       maxphysaddr,
0163       /* Setting huge page in the PDPTE gives us 1 GiB pages */
0164       PAGE_FLAGS_DEFAULTS | PAGE_FLAGS_HUGE_PAGE
0165     );
0166 
0167     amd64_pml4[i] = create_pml4_entry(
0168       (uint64_t) amd64_pdpt,
0169       maxphysaddr,
0170       PAGE_FLAGS_DEFAULTS
0171     );
0172   }
0173 
0174   amd64_set_cr3(
0175     create_cr3_entry(
0176       (uint64_t) &amd64_pml4,
0177       maxphysaddr,
0178       PAGE_FLAGS_WRITE_THROUGH
0179     )
0180   );
0181 }