From 8d4379bfe352a2f44936e053d825e28e572b6614 Mon Sep 17 00:00:00 2001 From: Tom Dohrmann Date: Sat, 26 Oct 2024 11:21:48 +0200 Subject: [PATCH 1/4] construct memory in memory mapped by other page table This has the advantage that we don't need to map the memory map into the bootloader's page tables. Up until now, we've assumed that UEFI only maps memory into regions covered by the first PML4 entry, but it turns out that some implementations map the frame buffer into memory mapped by other PML4 entries (this is totally legal, the frame buffer need not be identity mapped). Previously we removed all but the first PML4 entry, so that we can map our own memory there, but it's now clear that this can lead to problems. The solution to this is to not modify the page tables constructed by UEFI and keep all its PML4 entries, but this is problematic because the code constructing the boot info and memory map assumes that it can map them into the same addresses used for the kernel's address space, but UEFI may have already mapped some memory in there. Instead of mapping the boot info and memory map into the bootloader's address space, we now only map them into the kernel's address space. When we want to write to them, we first look up the physical addresses in the kernel's page table and write to the identity mapping. RemoteMemoryRegion implements this. We have some unit tests for constructing the memory map and we want those unit tests to be able to run outside the bootloader, so we can't use RemoteMemoryRegion. To solve this, we introduce a trait MemoryRegionSlice that only requires implementing a method for writing a memory region and implement that slice for MemoryRegionSlice as well as MemoryRegionSlice. --- common/src/legacy_memory_region.rs | 149 +++++++++++++++++++++++------ common/src/lib.rs | 97 ++++++++++--------- 2 files changed, 166 insertions(+), 80 deletions(-) diff --git a/common/src/legacy_memory_region.rs b/common/src/legacy_memory_region.rs index baa7d0b9..01fc9b96 100644 --- a/common/src/legacy_memory_region.rs +++ b/common/src/legacy_memory_region.rs @@ -1,13 +1,9 @@ use bootloader_api::info::{MemoryRegion, MemoryRegionKind}; -use core::{ - cmp, - iter::{empty, Empty}, - mem::MaybeUninit, -}; +use core::{cmp, mem::MaybeUninit}; use x86_64::{ align_down, align_up, - structures::paging::{FrameAllocator, PhysFrame, Size4KiB}, - PhysAddr, + structures::paging::{FrameAllocator, OffsetPageTable, PhysFrame, Size4KiB, Translate}, + PhysAddr, VirtAddr, }; /// A slice of memory that is used by the bootloader and needs to be reserved @@ -159,14 +155,14 @@ where /// must be at least the value returned by [`len`] plus 1. /// /// The return slice is a subslice of `regions`, shortened to the actual number of regions. - pub fn construct_memory_map( + pub(crate) fn construct_memory_map( self, - regions: &mut [MaybeUninit], + regions: &mut (impl MemoryRegionSlice + ?Sized), kernel_slice_start: PhysAddr, kernel_slice_len: u64, ramdisk_slice_start: Option, ramdisk_slice_len: u64, - ) -> &mut [MemoryRegion] { + ) -> usize { let used_slices = [ UsedMemorySlice { start: self.min_frame.start_address().as_u64(), @@ -211,17 +207,12 @@ where } } - let initialized = &mut regions[..next_index]; - unsafe { - // inlined variant of: `MaybeUninit::slice_assume_init_mut(initialized)` - // TODO: undo inlining when `slice_assume_init_mut` becomes stable - &mut *(initialized as *mut [_] as *mut [_]) - } + next_index } fn split_and_add_region<'a, U>( mut region: MemoryRegion, - regions: &mut [MaybeUninit], + regions: &mut (impl MemoryRegionSlice + ?Sized), next_index: &mut usize, used_slices: U, ) where @@ -279,24 +270,97 @@ where fn add_region( region: MemoryRegion, - regions: &mut [MaybeUninit], + regions: &mut (impl MemoryRegionSlice + ?Sized), next_index: &mut usize, ) { if region.start == region.end { // skip zero sized regions return; } - unsafe { - regions - .get_mut(*next_index) - .expect("cannot add region: no more free entries in memory map") - .as_mut_ptr() - .write(region) - }; + regions.set(*next_index, region); *next_index += 1; } } +/// A trait for slice-like types that allow writing a memory region to given +/// index. Usually `RemoteMemoryRegion` is used, but we use +/// `[MaybeUninit]` in tests. +pub(crate) trait MemoryRegionSlice { + fn set(&mut self, index: usize, region: MemoryRegion); +} + +#[cfg(test)] +impl MemoryRegionSlice for [MaybeUninit] { + fn set(&mut self, index: usize, region: MemoryRegion) { + self.get_mut(index) + .expect("cannot add region: no more free entries in memory map") + .write(region); + } +} + +/// This type makes it possible to write to a slice mapped in a different set +/// of page tables. For every write access, we look up the physical frame in +/// the page tables and directly write to the physical memory. That way we +/// don't need to map the slice into the current page tables. +pub(crate) struct RemoteMemoryRegion<'a> { + page_table: &'a OffsetPageTable<'a>, + base: VirtAddr, + len: usize, +} + +impl<'a> RemoteMemoryRegion<'a> { + /// Construct a new `RemoteMemoryRegion`. + /// + /// # Safety + /// + /// The caller has to ensure that the memory in the starting at `base` + /// isn't aliasing memory in the current page tables. + pub unsafe fn new(page_table: &'a OffsetPageTable<'a>, base: VirtAddr, len: usize) -> Self { + Self { + page_table, + base, + len, + } + } +} + +impl MemoryRegionSlice for RemoteMemoryRegion<'_> { + fn set(&mut self, index: usize, region: MemoryRegion) { + assert!( + index < self.len, + "cannot add region: no more free entries in memory map" + ); + + // Cast the memory region into a byte slice. MemoryRegion has some + // padding bytes, so need to use `MaybeUninit` instead of `u8`. + let bytes = unsafe { + core::slice::from_raw_parts( + ®ion as *const _ as *const MaybeUninit, + size_of::(), + ) + }; + + // An entry may cross a page boundary, so write one byte at a time. + let addr = self.base + index * size_of::(); + for (addr, byte) in (addr..).zip(bytes.iter().copied()) { + // Lookup the physical address in the remote page table. + let phys_addr = self + .page_table + .translate_addr(addr) + .expect("memory is mapped in the page table"); + + // Get a pointer to the physical memory -> All physical memory is + // identitiy mapped. + let ptr = phys_addr.as_u64() as *mut MaybeUninit; + + // Write the byte. + unsafe { + ptr.write(byte); + } + } + } +} + unsafe impl FrameAllocator for LegacyFrameAllocator where I: ExactSizeIterator + Clone, @@ -384,14 +448,21 @@ mod tests { let ramdisk_slice_start = None; let ramdisk_slice_len = 0; - let kernel_regions = allocator.construct_memory_map( - &mut regions, + let len = allocator.construct_memory_map( + regions.as_mut_slice(), kernel_slice_start, kernel_slice_len, ramdisk_slice_start, ramdisk_slice_len, ); + let initialized = &mut regions[..len]; + let kernel_regions = unsafe { + // inlined variant of: `MaybeUninit::slice_assume_init_mut(initialized)` + // TODO: undo inlining when `slice_assume_init_mut` becomes stable + &mut *(initialized as *mut [_] as *mut [MemoryRegion]) + }; + for region in kernel_regions.iter() { assert!(region.start % 0x1000 == 0); assert!(region.end % 0x1000 == 0); @@ -411,13 +482,21 @@ mod tests { let ramdisk_slice_start = Some(PhysAddr::new(0x60000)); let ramdisk_slice_len = 0x2000; - let kernel_regions = allocator.construct_memory_map( - &mut regions, + let len = allocator.construct_memory_map( + regions.as_mut_slice(), kernel_slice_start, kernel_slice_len, ramdisk_slice_start, ramdisk_slice_len, ); + + let initialized = &mut regions[..len]; + let kernel_regions = unsafe { + // inlined variant of: `MaybeUninit::slice_assume_init_mut(initialized)` + // TODO: undo inlining when `slice_assume_init_mut` becomes stable + &mut *(initialized as *mut [_] as *mut [MemoryRegion]) + }; + let mut kernel_regions = kernel_regions.iter(); // usable memory before the kernel assert_eq!( @@ -514,13 +593,21 @@ mod tests { let ramdisk_slice_start = Some(PhysAddr::new(0x60000)); let ramdisk_slice_len = 0x2000; - let kernel_regions = allocator.construct_memory_map( - &mut regions, + let len = allocator.construct_memory_map( + regions.as_mut_slice(), kernel_slice_start, kernel_slice_len, ramdisk_slice_start, ramdisk_slice_len, ); + + let initialized = &mut regions[..len]; + let kernel_regions = unsafe { + // inlined variant of: `MaybeUninit::slice_assume_init_mut(initialized)` + // TODO: undo inlining when `slice_assume_init_mut` becomes stable + &mut *(initialized as *mut [_] as *mut [MemoryRegion]) + }; + let mut kernel_regions = kernel_regions.iter(); // usable memory before the kernel diff --git a/common/src/lib.rs b/common/src/lib.rs index 1c8b1efe..0a8fd473 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -10,6 +10,7 @@ use bootloader_api::{ }; use bootloader_boot_config::{BootConfig, LevelFilter}; use core::{alloc::Layout, arch::asm, mem::MaybeUninit, slice}; +use legacy_memory_region::RemoteMemoryRegion; use level_4_entries::UsedLevel4Entries; use usize_conversions::FromUsize; use x86_64::{ @@ -481,69 +482,67 @@ where log::info!("Allocate bootinfo"); // allocate and map space for the boot info - let (boot_info, memory_regions) = { - let boot_info_layout = Layout::new::(); - let regions = frame_allocator.memory_map_max_region_count(); - let memory_regions_layout = Layout::array::(regions).unwrap(); - let (combined, memory_regions_offset) = - boot_info_layout.extend(memory_regions_layout).unwrap(); - - let boot_info_addr = mapping_addr( - config.mappings.boot_info, - u64::from_usize(combined.size()), - u64::from_usize(combined.align()), - &mut mappings.used_entries, - ) - .expect("boot info addr is not properly aligned"); + let boot_info_layout = Layout::new::(); + let regions = frame_allocator.memory_map_max_region_count(); + let memory_regions_layout = Layout::array::(regions).unwrap(); + let (combined, memory_regions_offset) = boot_info_layout.extend(memory_regions_layout).unwrap(); + + let boot_info_addr = mapping_addr( + config.mappings.boot_info, + u64::from_usize(combined.size()), + u64::from_usize(combined.align()), + &mut mappings.used_entries, + ) + .expect("boot info addr is not properly aligned"); - let memory_map_regions_addr = boot_info_addr + memory_regions_offset; - let memory_map_regions_end = boot_info_addr + combined.size(); + let memory_map_regions_addr = boot_info_addr + memory_regions_offset; + let memory_map_regions_end = boot_info_addr + combined.size(); - let start_page = Page::containing_address(boot_info_addr); - let end_page = Page::containing_address(memory_map_regions_end - 1u64); - for page in Page::range_inclusive(start_page, end_page) { - let flags = - PageTableFlags::PRESENT | PageTableFlags::WRITABLE | PageTableFlags::NO_EXECUTE; - let frame = frame_allocator - .allocate_frame() - .expect("frame allocation for boot info failed"); - match unsafe { - page_tables - .kernel - .map_to(page, frame, flags, &mut frame_allocator) - } { - Ok(tlb) => tlb.flush(), - Err(err) => panic!("failed to map page {:?}: {:?}", page, err), - } - // we need to be able to access it too - match unsafe { - page_tables - .bootloader - .map_to(page, frame, flags, &mut frame_allocator) - } { - Ok(tlb) => tlb.flush(), - Err(err) => panic!("failed to map page {:?}: {:?}", page, err), - } + let start_page = Page::containing_address(boot_info_addr); + let end_page = Page::containing_address(memory_map_regions_end - 1u64); + for page in Page::range_inclusive(start_page, end_page) { + let flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE | PageTableFlags::NO_EXECUTE; + let frame = frame_allocator + .allocate_frame() + .expect("frame allocation for boot info failed"); + match unsafe { + page_tables + .kernel + .map_to(page, frame, flags, &mut frame_allocator) + } { + Ok(tlb) => tlb.flush(), + Err(err) => panic!("failed to map page {:?}: {:?}", page, err), } + // we need to be able to access it too + match unsafe { + page_tables + .bootloader + .map_to(page, frame, flags, &mut frame_allocator) + } { + Ok(tlb) => tlb.flush(), + Err(err) => panic!("failed to map page {:?}: {:?}", page, err), + } + } - let boot_info: &'static mut MaybeUninit = - unsafe { &mut *boot_info_addr.as_mut_ptr() }; - let memory_regions: &'static mut [MaybeUninit] = - unsafe { slice::from_raw_parts_mut(memory_map_regions_addr.as_mut_ptr(), regions) }; - (boot_info, memory_regions) - }; + let boot_info: &'static mut MaybeUninit = + unsafe { &mut *boot_info_addr.as_mut_ptr() }; log::info!("Create Memory Map"); // build memory map - let memory_regions = frame_allocator.construct_memory_map( - memory_regions, + let mut slice = + unsafe { RemoteMemoryRegion::new(&page_tables.kernel, memory_map_regions_addr, regions) }; + let len = frame_allocator.construct_memory_map( + &mut slice, mappings.kernel_slice_start, mappings.kernel_slice_len, mappings.ramdisk_slice_phys_start, mappings.ramdisk_slice_len, ); + let memory_regions = + unsafe { core::slice::from_raw_parts_mut(memory_map_regions_addr.as_mut_ptr(), len) }; + log::info!("Create bootinfo"); // create boot info From 2548c25efd13aec001016eaccf0d66372fc8f553 Mon Sep 17 00:00:00 2001 From: Tom Dohrmann Date: Sat, 26 Oct 2024 11:47:43 +0200 Subject: [PATCH 2/4] use VirtAddr instead of &'static mut BootInfo In the future, we'll no longer map the BootInfo into the bootloader's page tables, so we can't/shouldn't create a mutable reference to it. We don't need to use a &'static mut anyway using VirtAddr works just as well. --- common/src/lib.rs | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/common/src/lib.rs b/common/src/lib.rs index 0a8fd473..c5f824d4 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -474,7 +474,7 @@ pub fn create_boot_info( page_tables: &mut PageTables, mappings: &mut Mappings, system_info: SystemInfo, -) -> &'static mut BootInfo +) -> VirtAddr where I: ExactSizeIterator + Clone, D: LegacyMemoryRegion, @@ -579,15 +579,11 @@ where info }); - boot_info + boot_info_addr } /// Switches to the kernel address space and jumps to the kernel entry point. -pub fn switch_to_kernel( - page_tables: PageTables, - mappings: Mappings, - boot_info: &'static mut BootInfo, -) -> ! { +pub fn switch_to_kernel(page_tables: PageTables, mappings: Mappings, boot_info: VirtAddr) -> ! { let PageTables { kernel_level_4_frame, .. @@ -637,7 +633,7 @@ unsafe fn context_switch(addresses: Addresses) -> ! { in(reg) addresses.page_table.start_address().as_u64(), in(reg) addresses.stack_top.as_u64(), in(reg) addresses.entry_point.as_u64(), - in("rdi") addresses.boot_info as *const _ as usize, + in("rdi") addresses.boot_info.as_u64(), ); } unreachable!(); @@ -648,7 +644,7 @@ struct Addresses { page_table: PhysFrame, stack_top: VirtAddr, entry_point: VirtAddr, - boot_info: &'static mut BootInfo, + boot_info: VirtAddr, } fn mapping_addr_page_aligned( From 94a549df52d9ccc112ce9465aabdcdba36e0eb17 Mon Sep 17 00:00:00 2001 From: Tom Dohrmann Date: Sat, 26 Oct 2024 11:52:33 +0200 Subject: [PATCH 3/4] don't map boot info and memory map into bootloader's memory Modifying the bootloaders page tables by mapping addresses that we don't know for sure are available can lead to all sorts of problems. Simply stop doing that. --- common/src/lib.rs | 92 +++++++++++++++++++++++------------------------ 1 file changed, 45 insertions(+), 47 deletions(-) diff --git a/common/src/lib.rs b/common/src/lib.rs index c5f824d4..8a701f23 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -463,10 +463,7 @@ pub struct Mappings { /// Allocates and initializes the boot info struct and the memory map. /// -/// The boot info and memory map are mapped to both the kernel and bootloader -/// address space at the same address. This makes it possible to return a Rust -/// reference that is valid in both address spaces. The necessary physical frames -/// are taken from the given `frame_allocator`. +/// The necessary physical frames are taken from the given `frame_allocator`. pub fn create_boot_info( config: &BootloaderConfig, boot_config: &BootConfig, @@ -513,19 +510,15 @@ where Ok(tlb) => tlb.flush(), Err(err) => panic!("failed to map page {:?}: {:?}", page, err), } - // we need to be able to access it too - match unsafe { - page_tables - .bootloader - .map_to(page, frame, flags, &mut frame_allocator) - } { - Ok(tlb) => tlb.flush(), - Err(err) => panic!("failed to map page {:?}: {:?}", page, err), - } } - let boot_info: &'static mut MaybeUninit = - unsafe { &mut *boot_info_addr.as_mut_ptr() }; + let boot_info: &'static mut MaybeUninit = unsafe { + // SAFETY: This is technically UB because the current page tables don't + // map `memory_map_regions_addr`, so we have to be careful to not + // access any elements. + // We have to do this because `BootInfo` needs a `MemoryRegions`. + &mut *boot_info_addr.as_mut_ptr() + }; log::info!("Create Memory Map"); @@ -546,38 +539,43 @@ where log::info!("Create bootinfo"); // create boot info - let boot_info = boot_info.write({ - let mut info = BootInfo::new(memory_regions.into()); - info.framebuffer = mappings - .framebuffer - .map(|addr| unsafe { - FrameBuffer::new( - addr.as_u64(), - system_info - .framebuffer - .expect( - "there shouldn't be a mapping for the framebuffer if there is \ - no framebuffer", - ) - .info, - ) - }) - .into(); - info.physical_memory_offset = mappings.physical_memory_offset.map(VirtAddr::as_u64).into(); - info.recursive_index = mappings.recursive_index.map(Into::into).into(); - info.rsdp_addr = system_info.rsdp_addr.map(|addr| addr.as_u64()).into(); - info.tls_template = mappings.tls_template.into(); - info.ramdisk_addr = mappings - .ramdisk_slice_start - .map(|addr| addr.as_u64()) - .into(); - info.ramdisk_len = mappings.ramdisk_slice_len; - info.kernel_addr = mappings.kernel_slice_start.as_u64(); - info.kernel_len = mappings.kernel_slice_len as _; - info.kernel_image_offset = mappings.kernel_image_offset.as_u64(); - info._test_sentinel = boot_config._test_sentinel; - info - }); + let mut info = BootInfo::new(memory_regions.into()); + info.framebuffer = mappings + .framebuffer + .map(|addr| unsafe { + FrameBuffer::new( + addr.as_u64(), + system_info + .framebuffer + .expect( + "there shouldn't be a mapping for the framebuffer if there is \ + no framebuffer", + ) + .info, + ) + }) + .into(); + info.physical_memory_offset = mappings.physical_memory_offset.map(VirtAddr::as_u64).into(); + info.recursive_index = mappings.recursive_index.map(Into::into).into(); + info.rsdp_addr = system_info.rsdp_addr.map(|addr| addr.as_u64()).into(); + info.tls_template = mappings.tls_template.into(); + info.ramdisk_addr = mappings + .ramdisk_slice_start + .map(|addr| addr.as_u64()) + .into(); + info.ramdisk_len = mappings.ramdisk_slice_len; + info.kernel_addr = mappings.kernel_slice_start.as_u64(); + info.kernel_len = mappings.kernel_slice_len as _; + info.kernel_image_offset = mappings.kernel_image_offset.as_u64(); + info._test_sentinel = boot_config._test_sentinel; + + // Write to boot info directly to the identity-mapped frame. + let boot_info_frame = page_tables.kernel.translate_page(start_page).unwrap(); + assert!(size_of::() <= Size4KiB::SIZE as usize); + let ptr = boot_info_frame.start_address().as_u64() as *mut BootInfo; + unsafe { + ptr.write(info); + } boot_info_addr } From 177531737b5efbb0b4619dcd1669bb0b1351aa03 Mon Sep 17 00:00:00 2001 From: Tom Dohrmann Date: Sat, 26 Oct 2024 11:56:42 +0200 Subject: [PATCH 4/4] don't switch to new page tables in bootloader We don't need to do this anymore because we no longer modify the bootloader's page tables. This also finally makes it possible to access all memory mapped in by UEFI and not just the memory accessible through the first PML4 entry. Some UEFI implementations map the frame buffer into ranges not accessible through the first PML4 entry. --- bios/stage-4/src/main.rs | 8 -------- common/src/lib.rs | 2 -- uefi/src/main.rs | 36 ------------------------------------ 3 files changed, 46 deletions(-) diff --git a/bios/stage-4/src/main.rs b/bios/stage-4/src/main.rs index cf159a61..ce597ae2 100644 --- a/bios/stage-4/src/main.rs +++ b/bios/stage-4/src/main.rs @@ -221,13 +221,6 @@ fn create_page_tables(frame_allocator: &mut impl FrameAllocator) -> Pa // We identity-mapped all memory, so the offset between physical and virtual addresses is 0 let phys_offset = VirtAddr::new(0); - // copy the currently active level 4 page table, because it might be read-only - let bootloader_page_table = { - let frame = x86_64::registers::control::Cr3::read().0; - let table: *mut PageTable = (phys_offset + frame.start_address().as_u64()).as_mut_ptr(); - unsafe { OffsetPageTable::new(&mut *table, phys_offset) } - }; - // create a new page table hierarchy for the kernel let (kernel_page_table, kernel_level_4_frame) = { // get an unused frame for new level 4 page table @@ -246,7 +239,6 @@ fn create_page_tables(frame_allocator: &mut impl FrameAllocator) -> Pa }; PageTables { - bootloader: bootloader_page_table, kernel: kernel_page_table, kernel_level_4_frame, } diff --git a/common/src/lib.rs b/common/src/lib.rs index 8a701f23..59d20952 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -605,8 +605,6 @@ pub fn switch_to_kernel(page_tables: PageTables, mappings: Mappings, boot_info: /// Provides access to the page tables of the bootloader and kernel address space. pub struct PageTables { - /// Provides access to the page tables of the bootloader address space. - pub bootloader: OffsetPageTable<'static>, /// Provides access to the page tables of the kernel address space (not active). pub kernel: OffsetPageTable<'static>, /// The physical frame where the level 4 page table of the kernel address space is stored. diff --git a/uefi/src/main.rs b/uefi/src/main.rs index 93dfb6c7..7a9444e6 100644 --- a/uefi/src/main.rs +++ b/uefi/src/main.rs @@ -389,41 +389,6 @@ fn create_page_tables( // UEFI identity-maps all memory, so the offset between physical and virtual addresses is 0 let phys_offset = VirtAddr::new(0); - // copy the currently active level 4 page table, because it might be read-only - log::trace!("switching to new level 4 table"); - let bootloader_page_table = { - let old_table = { - let frame = x86_64::registers::control::Cr3::read().0; - let ptr: *const PageTable = (phys_offset + frame.start_address().as_u64()).as_ptr(); - unsafe { &*ptr } - }; - let new_frame = frame_allocator - .allocate_frame() - .expect("Failed to allocate frame for new level 4 table"); - let new_table: &mut PageTable = { - let ptr: *mut PageTable = - (phys_offset + new_frame.start_address().as_u64()).as_mut_ptr(); - // create a new, empty page table - unsafe { - ptr.write(PageTable::new()); - &mut *ptr - } - }; - - // copy the first entry (we don't need to access more than 512 GiB; also, some UEFI - // implementations seem to create an level 4 table entry 0 in all slots) - new_table[0] = old_table[0].clone(); - - // the first level 4 table entry is now identical, so we can just load the new one - unsafe { - x86_64::registers::control::Cr3::write( - new_frame, - x86_64::registers::control::Cr3Flags::empty(), - ); - OffsetPageTable::new(&mut *new_table, phys_offset) - } - }; - // create a new page table hierarchy for the kernel let (kernel_page_table, kernel_level_4_frame) = { // get an unused frame for new level 4 page table @@ -442,7 +407,6 @@ fn create_page_tables( }; bootloader_x86_64_common::PageTables { - bootloader: bootloader_page_table, kernel: kernel_page_table, kernel_level_4_frame, }