fix(paging): translating addresses

This commit is contained in:
Mahdi Dibaiee 2016-07-11 10:43:02 +04:30
parent 23fe830220
commit f4a8ca7261
4 changed files with 130 additions and 18 deletions

View File

@ -1,9 +1,9 @@
mod paging;
mod area_frame_allocator;
pub use self::area_frame_allocator::AreaFrameAllocator;
use self::paging::PhysicalAddress;
mod area_frame_allocator;
mod paging;
pub const PAGE_SIZE: usize = 4096;
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]

View File

@ -1,4 +1,5 @@
use memory::PAGE_SIZE;
use memory::Frame;
pub struct Entry(u64);
@ -26,7 +27,7 @@ impl Entry {
}
pub fn set (&mut self, frame: Frame, flags: EntryFlags) {
assert!(frame.start_address() & !x000fffff_fffff000 == 0);
assert!(frame.start_address() & !0x000fffff_fffff000 == 0);
self.0 = (frame.start_address() as u64) | flags.bits();
}
}

View File

@ -1,8 +1,73 @@
mod entry;
mod table;
use memory::PAGE_SIZE;
use memory::Frame;
pub use self::entry::*;
use memory::FrameAllocator;
use self::table::*;
const ENTRY_COUNT: usize = 512;
pub fn map_to<A>(page: Page, frame: Frame, flags: EntryFlags, allocator: &mut A)
where A: FrameAllocator
{
let p4 = unsafe { &mut *P4 };
let mut p3 = p4.next_table_create(page.p4_index(), allocator);
let mut p2 = p3.next_table_create(page.p3_index(), allocator);
let mut p1 = p2.next_table_create(page.p2_index(), allocator);
assert!(p1[page.p1_index()].is_unused());
p1[page.p1_index()].set(frame, flags | PRESENT);
}
pub fn translate(virtual_address: VirtualAddress) -> Option<PhysicalAddress> {
let offset = virtual_address & PAGE_SIZE;
translate_page(Page::containing_address(virtual_address))
.map(|frame| frame.number * PAGE_SIZE + offset)
}
pub fn translate_page(page: Page) -> Option<Frame> {
use self::entry::HUGE_PAGE;
let p3 = unsafe { &*table::P4 }.next_table(page.p4_index());
let huge_page = || {
p3.and_then(|p3| {
let p3_entry = &p3[page.p3_index()];
// 1GiB page?
if let Some(start_frame) = p3_entry.pointed_frame() {
if p3_entry.flags().contains(HUGE_PAGE) {
// address must be 1GiB aligned
assert!(start_frame.number % (ENTRY_COUNT * ENTRY_COUNT) == 0);
return Some(Frame {
number: start_frame.number + page.p2_index() * ENTRY_COUNT + page.p1_index(),
});
}
}
if let Some(p2) = p3.next_table(page.p3_index()) {
let p2_entry = &p2[page.p2_index()];
// 2MiB page?
if let Some(start_frame) = p2_entry.pointed_frame() {
if p2_entry.flags().contains(HUGE_PAGE) {
// address must be 2MiB aligned
assert!(start_frame.number % ENTRY_COUNT == 0);
return Some(Frame { number: start_frame.number + page.p1_index() });
}
}
}
None
})
};
p3.and_then(|p3| p3.next_table(page.p3_index()))
.and_then(|p2| p2.next_table(page.p2_index()))
.and_then(|p1| p1[page.p1_index()].pointed_frame())
.or_else(huge_page)
}
pub type PhysicalAddress = usize;
pub type VirtualAddress = usize;
@ -10,6 +75,37 @@ pub struct Page {
number: usize,
}
impl Page {
pub fn containing_address(address: VirtualAddress) -> Page {
assert!(address < 0x0000_8000_0000_0000 ||
address >= 0xffff_8000_0000_0000,
"invalid address: 0x{:x}", address);
Page { number: address / PAGE_SIZE }
}
fn start_address(&self) -> usize {
self.number * PAGE_SIZE
}
fn p4_index(&self) -> usize {
(self.number >> 27) & 0o777
}
fn p3_index(&self) -> usize {
(self.number >> 18) & 0o777
}
fn p2_index(&self) -> usize {
(self.number >> 9) & 0o777
}
fn p1_index(&self) -> usize {
(self.number >> 0) & 0o777
}
}
/*
pub struct Entry(u64);
impl Entry {
@ -55,3 +151,4 @@ bitflags! {
const NO_EXECUTE = 1 << 63,
}
}
*/

View File

@ -1,5 +1,6 @@
use memory::paging::entry::*;
use memory::paging::ENTRY_COUNT;
use memory::FrameAllocator;
use core::ops::{Index, IndexMut};
use core::marker::PhantomData;
@ -7,7 +8,9 @@ pub const P4: *mut Table<Level4> = 0xffffffff_fffff000 as *mut _;
pub trait TableLevel {}
pub enum Level4 {}
#[allow(dead_Code)]
pub enum Level3 {}
#[allow(dead_Code)]
pub enum Level2 {}
pub enum Level1 {}
@ -16,18 +19,19 @@ impl TableLevel for Level3 {}
impl TableLevel for Level2 {}
impl TableLevel for Level1 {}
pub trait HierarchicalLevel: TableLevel {
type NextLevel: TableLevel;
}
impl HierarchicalLevel for Level4 {
type NextLevel: Level3
type NextLevel = Level3;
}
impl HierarchicalLevel for Level3 {
type NextLevel: Level2
type NextLevel = Level2;
}
impl HierarchicalLevel for Level2 {
type NextLevel: Level1
type NextLevel = Level1;
}
pub struct Table<L: TableLevel> {
@ -64,6 +68,23 @@ impl<L> Table<L> where L: HierarchicalLevel {
self.next_table_address(index)
.map(|address| unsafe { &mut *(address as *mut _) })
}
pub fn next_table_create<A>(&mut self, index: usize, allocator: &mut A)
-> &mut Table<L::NextLevel>
where A: FrameAllocator
{
if self.next_table(index).is_none() {
assert!(!self.entries[index].flags().contains(HUGE_PAGE),
"mapping code does not support huge pages");
let frame = allocator.allocate_frame().expect("no frames available");
self.entries[index].set(frame, PRESENT | WRITABLE);
self.next_table_mut(index).unwrap().zero();
}
self.next_table_mut(index).unwrap()
}
}
impl<L> Index<usize> for Table<L> where L: TableLevel {
@ -80,10 +101,3 @@ impl<L> IndexMut<usize> for Table<L> where L: TableLevel {
}
}
fn test() {
let p4 = unsafe { &*P4 };
p4.next_table(4@)
.and_then(|p3| p3.next_table(1337))
.and_then(|p2| p2.next_table(0xdeadbeaf))
.and_then(|p1| p1.next_table(0xcafebabe))
}