Snap for 8992082 from 34c728d69ef12bef4f675ed7d22df3b93cfe4dc3 to gki13-boot-release

Change-Id: I4f1a17fee4b2686dc5c755e1b280a0ac77f52971
diff --git a/.cargo_vcs_info.json b/.cargo_vcs_info.json
index 6eae027..6abae94 100644
--- a/.cargo_vcs_info.json
+++ b/.cargo_vcs_info.json
@@ -1,6 +1,6 @@
 {
   "git": {
-    "sha1": "c5c869961318cc95a5fb5e61a7da0783fea04510"
+    "sha1": "7cf4bc2d66a3edb354c1bb15c6c5ef7de518082d"
   },
   "path_in_vcs": ""
 }
\ No newline at end of file
diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml
index 769c845..a65965c 100644
--- a/.github/workflows/rust.yml
+++ b/.github/workflows/rust.yml
@@ -24,6 +24,8 @@
         run: cargo build --target=aarch64-unknown-none
       - name: Run tests
         run: cargo test
+      - name: Run tests without default features
+        run: cargo test --no-default-features
       - name: Run clippy
         uses: actions-rs/clippy-check@v1
         with:
diff --git a/Android.bp b/Android.bp
index 19fa983..874f6a9 100644
--- a/Android.bp
+++ b/Android.bp
@@ -45,7 +45,7 @@
     host_supported: true,
     crate_name: "aarch64_paging",
     cargo_env_compat: true,
-    cargo_pkg_version: "0.2.1",
+    cargo_pkg_version: "0.3.0",
     srcs: ["src/lib.rs"],
     test_suites: ["general-tests"],
     auto_gen_config: true,
@@ -53,6 +53,10 @@
         unit_test: true,
     },
     edition: "2021",
+    features: [
+        "alloc",
+        "default",
+    ],
     rustlibs: [
         "libbitflags",
     ],
@@ -69,9 +73,13 @@
     host_supported: true,
     crate_name: "aarch64_paging",
     cargo_env_compat: true,
-    cargo_pkg_version: "0.2.1",
+    cargo_pkg_version: "0.3.0",
     srcs: ["src/lib.rs"],
     edition: "2021",
+    features: [
+        "alloc",
+        "default",
+    ],
     rustlibs: [
         "libbitflags",
     ],
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 0000000..1734e71
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,55 @@
+# Changelog
+
+## 0.3.0
+
+### Breaking changes
+
+- Made `Translation` trait responsible for allocating page tables. This should help make it possible
+  to use more complex mapping schemes, and to construct page tables in a different context to where
+  they are used.
+- Renamed `AddressRangeError` to `MapError`, which is now an enum with three variants and implements
+  `Display`.
+- `From<*const T>` and `From<*mut T>` are no longer implemented for `VirtualAddress`.
+- Added support for using TTBR1 as well as TTBR0; this changes various constructors to take an extra
+  parameter.
+
+### New features
+
+- Made `alloc` dependency optional via a feature flag.
+- Added support for linear mappings with new `LinearMap`.
+- Implemented subtraction of usize from address types.
+
+### Bugfixes
+
+- Fixed memory leak introduced in 0.2.0: dropping a page table will now actually free its memory.
+
+## 0.2.1
+
+### New features
+
+- Implemented `Debug` and `Display` for `MemoryRegion`.
+- Implemented `From<Range<VirtualAddress>>` for `MemoryRegion`.
+- Implemented arithmetic operations for `PhysicalAddress` and `VirtualAddress`.
+
+## 0.2.0
+
+### Breaking changes
+
+- Added bounds check to `IdMap::map_range`; it will now return an error if you attempt to map a
+  virtual address outside the range of the page table given its configured root level.
+
+### New features
+
+- Implemented `Debug` for `PhysicalAddress` and `VirtualAddress`.
+- Validate that chosen root level is supported.
+
+### Bugfixes
+
+- Fixed bug in `Display` and `Drop` implementation for `RootTable` that would result in a crash for
+  any pagetable with non-zero mappings.
+- Fixed `Display` implementation for `PhysicalAddress` and `VirtualAddress` to use correct number of
+  digits.
+
+## 0.1.0
+
+Initial release.
diff --git a/Cargo.toml b/Cargo.toml
index 894c161..e419364 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -12,7 +12,7 @@
 [package]
 edition = "2021"
 name = "aarch64-paging"
-version = "0.2.1"
+version = "0.3.0"
 authors = [
     "Ard Biesheuvel <ardb@google.com>",
     "Andrew Walbran <qwandor@google.com>",
@@ -35,7 +35,12 @@
 resolver = "2"
 
 [package.metadata.docs.rs]
+all-features = true
 default-target = "aarch64-unknown-none"
 
 [dependencies.bitflags]
 version = "1.3.2"
+
+[features]
+alloc = []
+default = ["alloc"]
diff --git a/Cargo.toml.orig b/Cargo.toml.orig
index 74d0137..ca1e9ec 100644
--- a/Cargo.toml.orig
+++ b/Cargo.toml.orig
@@ -1,6 +1,6 @@
 [package]
 name = "aarch64-paging"
-version = "0.2.1"
+version = "0.3.0"
 edition = "2021"
 license = "MIT OR Apache-2.0"
 description = "A library to manipulate AArch64 VMSA EL1 page tables."
@@ -12,5 +12,10 @@
 [dependencies]
 bitflags = "1.3.2"
 
+[features]
+default = ["alloc"]
+alloc = []
+
 [package.metadata.docs.rs]
+all-features = true
 default-target = "aarch64-unknown-none"
diff --git a/METADATA b/METADATA
index 3ae9d2a..701e31a 100644
--- a/METADATA
+++ b/METADATA
@@ -7,13 +7,13 @@
   }
   url {
     type: ARCHIVE
-    value: "https://static.crates.io/crates/aarch64-paging/aarch64-paging-0.2.1.crate"
+    value: "https://static.crates.io/crates/aarch64-paging/aarch64-paging-0.3.0.crate"
   }
-  version: "0.2.1"
+  version: "0.3.0"
   license_type: NOTICE
   last_upgrade_date {
     year: 2022
-    month: 6
-    day: 29
+    month: 8
+    day: 15
   }
 }
diff --git a/src/idmap.rs b/src/idmap.rs
index 63918df..06455ed 100644
--- a/src/idmap.rs
+++ b/src/idmap.rs
@@ -3,17 +3,52 @@
 // See LICENSE-APACHE and LICENSE-MIT for details.
 
 //! Functionality for managing page tables with identity mapping.
+//!
+//! See [`IdMap`] for details on how to use it.
 
 use crate::{
-    paging::{Attributes, MemoryRegion, PhysicalAddress, RootTable, Translation, VirtualAddress},
-    AddressRangeError,
+    paging::{
+        deallocate, Attributes, MemoryRegion, PageTable, PhysicalAddress, Translation, VaRange,
+        VirtualAddress,
+    },
+    MapError, Mapping,
 };
-#[cfg(target_arch = "aarch64")]
-use core::arch::asm;
+use core::ptr::NonNull;
 
-/// Manages a level 1 page-table using identity mapping, where every virtual address is either
+/// Identity mapping, where every virtual address is either unmapped or mapped to the identical IPA.
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub struct IdTranslation;
+
+impl IdTranslation {
+    fn virtual_to_physical(va: VirtualAddress) -> PhysicalAddress {
+        PhysicalAddress(va.0)
+    }
+}
+
+impl Translation for IdTranslation {
+    fn allocate_table(&self) -> (NonNull<PageTable>, PhysicalAddress) {
+        let table = PageTable::new();
+
+        // Physical address is the same as the virtual address because we are using identity mapping
+        // everywhere.
+        (table, PhysicalAddress(table.as_ptr() as usize))
+    }
+
+    unsafe fn deallocate_table(&self, page_table: NonNull<PageTable>) {
+        deallocate(page_table);
+    }
+
+    fn physical_to_virtual(&self, pa: PhysicalAddress) -> NonNull<PageTable> {
+        NonNull::new(pa.0 as *mut PageTable).expect("Got physical address 0 for pagetable")
+    }
+}
+
+/// Manages a level 1 page table using identity mapping, where every virtual address is either
 /// unmapped or mapped to the identical IPA.
 ///
+/// This assumes that identity mapping is used both for the page table being managed, and for code
+/// that is managing it.
+///
 /// Mappings should be added with [`map_range`](Self::map_range) before calling
 /// [`activate`](Self::activate) to start using the new page table. To make changes which may
 /// require break-before-make semantics you must first call [`deactivate`](Self::deactivate) to
@@ -57,30 +92,14 @@
 /// ```
 #[derive(Debug)]
 pub struct IdMap {
-    root: RootTable<IdMap>,
-    #[allow(unused)]
-    asid: usize,
-    #[allow(unused)]
-    previous_ttbr: Option<usize>,
-}
-
-impl Translation for IdMap {
-    fn virtual_to_physical(va: VirtualAddress) -> PhysicalAddress {
-        PhysicalAddress(va.0)
-    }
-
-    fn physical_to_virtual(pa: PhysicalAddress) -> VirtualAddress {
-        VirtualAddress(pa.0)
-    }
+    mapping: Mapping<IdTranslation>,
 }
 
 impl IdMap {
     /// Creates a new identity-mapping page table with the given ASID and root level.
-    pub fn new(asid: usize, rootlevel: usize) -> IdMap {
-        IdMap {
-            root: RootTable::new(rootlevel),
-            asid,
-            previous_ttbr: None,
+    pub fn new(asid: usize, rootlevel: usize) -> Self {
+        Self {
+            mapping: Mapping::new(IdTranslation, asid, rootlevel, VaRange::Lower),
         }
     }
 
@@ -91,23 +110,7 @@
     /// `deactivate`.
     #[cfg(target_arch = "aarch64")]
     pub fn activate(&mut self) {
-        assert!(self.previous_ttbr.is_none());
-
-        let mut previous_ttbr;
-        unsafe {
-            // Safe because we trust that self.root.to_physical() returns a valid physical address
-            // of a page table, and the `Drop` implementation will reset `TTRB0_EL1` before it
-            // becomes invalid.
-            asm!(
-                "mrs   {previous_ttbr}, ttbr0_el1",
-                "msr   ttbr0_el1, {ttbrval}",
-                "isb",
-                ttbrval = in(reg) self.root.to_physical().0 | (self.asid << 48),
-                previous_ttbr = out(reg) previous_ttbr,
-                options(preserves_flags),
-            );
-        }
-        self.previous_ttbr = Some(previous_ttbr);
+        self.mapping.activate()
     }
 
     /// Deactivates the page table, by setting `TTBR0_EL1` back to the value it had before
@@ -118,21 +121,7 @@
     /// called.
     #[cfg(target_arch = "aarch64")]
     pub fn deactivate(&mut self) {
-        unsafe {
-            // Safe because this just restores the previously saved value of `TTBR0_EL1`, which must
-            // have been valid.
-            asm!(
-                "msr   ttbr0_el1, {ttbrval}",
-                "isb",
-                "tlbi  aside1, {asid}",
-                "dsb   nsh",
-                "isb",
-                asid = in(reg) self.asid << 48,
-                ttbrval = in(reg) self.previous_ttbr.unwrap(),
-                options(preserves_flags),
-            );
-        }
-        self.previous_ttbr = None;
+        self.mapping.deactivate()
     }
 
     /// Maps the given range of virtual addresses to the identical physical addresses with the given
@@ -142,34 +131,24 @@
     /// change that may require break-before-make per the architecture must be made while the page
     /// table is inactive. Mapping a previously unmapped memory range may be done while the page
     /// table is active.
-    pub fn map_range(
-        &mut self,
-        range: &MemoryRegion,
-        flags: Attributes,
-    ) -> Result<(), AddressRangeError> {
-        self.root.map_range(range, flags)?;
-        #[cfg(target_arch = "aarch64")]
-        unsafe {
-            // Safe because this is just a memory barrier.
-            asm!("dsb ishst");
-        }
-        Ok(())
-    }
-}
-
-impl Drop for IdMap {
-    fn drop(&mut self) {
-        if self.previous_ttbr.is_some() {
-            #[cfg(target_arch = "aarch64")]
-            self.deactivate();
-        }
+    ///
+    /// # Errors
+    ///
+    /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
+    /// largest virtual address covered by the page table given its root level.
+    pub fn map_range(&mut self, range: &MemoryRegion, flags: Attributes) -> Result<(), MapError> {
+        let pa = IdTranslation::virtual_to_physical(range.start());
+        self.mapping.map_range(range, pa, flags)
     }
 }
 
 #[cfg(test)]
 mod tests {
     use super::*;
-    use crate::paging::PAGE_SIZE;
+    use crate::{
+        paging::{Attributes, MemoryRegion, PAGE_SIZE},
+        MapError,
+    };
 
     const MAX_ADDRESS_FOR_ROOT_LEVEL_1: usize = 1 << 39;
 
@@ -202,6 +181,16 @@
             Ok(())
         );
 
+        // Two pages, on the boundary between two subtables.
+        let mut idmap = IdMap::new(1, 1);
+        assert_eq!(
+            idmap.map_range(
+                &MemoryRegion::new(PAGE_SIZE * 1023, PAGE_SIZE * 1025),
+                Attributes::NORMAL
+            ),
+            Ok(())
+        );
+
         // The entire valid address space.
         let mut idmap = IdMap::new(1, 1);
         assert_eq!(
@@ -226,7 +215,9 @@
                 ),
                 Attributes::NORMAL
             ),
-            Err(AddressRangeError)
+            Err(MapError::AddressRange(VirtualAddress(
+                MAX_ADDRESS_FOR_ROOT_LEVEL_1 + PAGE_SIZE
+            )))
         );
 
         // From 0 to just past the valid range.
@@ -235,7 +226,9 @@
                 &MemoryRegion::new(0, MAX_ADDRESS_FOR_ROOT_LEVEL_1 + 1,),
                 Attributes::NORMAL
             ),
-            Err(AddressRangeError)
+            Err(MapError::AddressRange(VirtualAddress(
+                MAX_ADDRESS_FOR_ROOT_LEVEL_1 + PAGE_SIZE
+            )))
         );
     }
 }
diff --git a/src/lib.rs b/src/lib.rs
index 40abedd..80cee63 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -9,13 +9,15 @@
 //!   - EL1
 //!   - 4 KiB pages
 //!
-//! Full support is only provided for identity mapping; for other mapping schemes the user of the
-//! library must implement some functionality themself including an implementation of the
-//! [`Translation`](paging::Translation) trait.
+//! Full support is provided for identity mapping ([`IdMap`](idmap::IdMap)) and linear mapping
+//! ([`LinearMap`](linearmap::LinearMap)). If you want to use a different mapping scheme, you must
+//! provide an implementation of the [`Translation`](paging::Translation) trait and then use
+//! [`Mapping`] directly.
 //!
 //! # Example
 //!
 //! ```
+//! # #[cfg(feature = "alloc")] {
 //! use aarch64_paging::{
 //!     idmap::IdMap,
 //!     paging::{Attributes, MemoryRegion},
@@ -34,14 +36,180 @@
 //! // Set `TTBR0_EL1` to activate the page table.
 //! # #[cfg(target_arch = "aarch64")]
 //! idmap.activate();
+//! # }
 //! ```
 
 #![no_std]
 
+#[cfg(feature = "alloc")]
 pub mod idmap;
+#[cfg(feature = "alloc")]
+pub mod linearmap;
 pub mod paging;
 
+#[cfg(feature = "alloc")]
 extern crate alloc;
 
-#[derive(Copy, Clone, Debug, Eq, PartialEq)]
-pub struct AddressRangeError;
+#[cfg(target_arch = "aarch64")]
+use core::arch::asm;
+use core::fmt::{self, Display, Formatter};
+use paging::{
+    Attributes, MemoryRegion, PhysicalAddress, RootTable, Translation, VaRange, VirtualAddress,
+};
+
+/// An error attempting to map some range in the page table.
+#[derive(Clone, Debug, Eq, PartialEq)]
+pub enum MapError {
+    /// The address requested to be mapped was out of the range supported by the page table
+    /// configuration.
+    AddressRange(VirtualAddress),
+    /// The address requested to be mapped was not valid for the mapping in use.
+    InvalidVirtualAddress(VirtualAddress),
+    /// The end of the memory region is before the start.
+    RegionBackwards(MemoryRegion),
+}
+
+impl Display for MapError {
+    fn fmt(&self, f: &mut Formatter) -> fmt::Result {
+        match self {
+            Self::AddressRange(va) => write!(f, "Virtual address {} out of range", va),
+            Self::InvalidVirtualAddress(va) => {
+                write!(f, "Invalid virtual address {} for mapping", va)
+            }
+            Self::RegionBackwards(region) => {
+                write!(f, "End of memory region {} is before start.", region)
+            }
+        }
+    }
+}
+
+/// Manages a level 1 page table and associated state.
+///
+/// Mappings should be added with [`map_range`](Self::map_range) before calling
+/// [`activate`](Self::activate) to start using the new page table. To make changes which may
+/// require break-before-make semantics you must first call [`deactivate`](Self::deactivate) to
+/// switch back to a previous static page table, and then `activate` again after making the desired
+/// changes.
+#[derive(Debug)]
+pub struct Mapping<T: Translation + Clone> {
+    root: RootTable<T>,
+    #[allow(unused)]
+    asid: usize,
+    #[allow(unused)]
+    previous_ttbr: Option<usize>,
+}
+
+impl<T: Translation + Clone> Mapping<T> {
+    /// Creates a new page table with the given ASID, root level and translation mapping.
+    pub fn new(translation: T, asid: usize, rootlevel: usize, va_range: VaRange) -> Self {
+        Self {
+            root: RootTable::new(translation, rootlevel, va_range),
+            asid,
+            previous_ttbr: None,
+        }
+    }
+
+    /// Activates the page table by setting `TTBRn_EL1` to point to it, and saves the previous value
+    /// of `TTBRn_EL1` so that it may later be restored by [`deactivate`](Self::deactivate).
+    ///
+    /// Panics if a previous value of `TTBRn_EL1` is already saved and not yet used by a call to
+    /// `deactivate`.
+    #[cfg(target_arch = "aarch64")]
+    pub fn activate(&mut self) {
+        assert!(self.previous_ttbr.is_none());
+
+        let mut previous_ttbr;
+        unsafe {
+            // Safe because we trust that self.root.to_physical() returns a valid physical address
+            // of a page table, and the `Drop` implementation will reset `TTBRn_EL1` before it
+            // becomes invalid.
+            match self.root.va_range() {
+                VaRange::Lower => asm!(
+                    "mrs   {previous_ttbr}, ttbr0_el1",
+                    "msr   ttbr0_el1, {ttbrval}",
+                    "isb",
+                    ttbrval = in(reg) self.root.to_physical().0 | (self.asid << 48),
+                    previous_ttbr = out(reg) previous_ttbr,
+                    options(preserves_flags),
+                ),
+                VaRange::Upper => asm!(
+                    "mrs   {previous_ttbr}, ttbr1_el1",
+                    "msr   ttbr1_el1, {ttbrval}",
+                    "isb",
+                    ttbrval = in(reg) self.root.to_physical().0 | (self.asid << 48),
+                    previous_ttbr = out(reg) previous_ttbr,
+                    options(preserves_flags),
+                ),
+            }
+        }
+        self.previous_ttbr = Some(previous_ttbr);
+    }
+
+    /// Deactivates the page table, by setting `TTBRn_EL1` back to the value it had before
+    /// [`activate`](Self::activate) was called, and invalidating the TLB for this page table's
+    /// configured ASID.
+    ///
+    /// Panics if there is no saved `TTBRn_EL1` value because `activate` has not previously been
+    /// called.
+    #[cfg(target_arch = "aarch64")]
+    pub fn deactivate(&mut self) {
+        unsafe {
+            // Safe because this just restores the previously saved value of `TTBRn_EL1`, which must
+            // have been valid.
+            match self.root.va_range() {
+                VaRange::Lower => asm!(
+                    "msr   ttbr0_el1, {ttbrval}",
+                    "isb",
+                    "tlbi  aside1, {asid}",
+                    "dsb   nsh",
+                    "isb",
+                    asid = in(reg) self.asid << 48,
+                    ttbrval = in(reg) self.previous_ttbr.unwrap(),
+                    options(preserves_flags),
+                ),
+                VaRange::Upper => asm!(
+                    "msr   ttbr1_el1, {ttbrval}",
+                    "isb",
+                    "tlbi  aside1, {asid}",
+                    "dsb   nsh",
+                    "isb",
+                    asid = in(reg) self.asid << 48,
+                    ttbrval = in(reg) self.previous_ttbr.unwrap(),
+                    options(preserves_flags),
+                ),
+            }
+        }
+        self.previous_ttbr = None;
+    }
+
+    /// Maps the given range of virtual addresses to the corresponding range of physical addresses
+    /// starting at `pa`, with the given flags.
+    ///
+    /// This should generally only be called while the page table is not active. In particular, any
+    /// change that may require break-before-make per the architecture must be made while the page
+    /// table is inactive. Mapping a previously unmapped memory range may be done while the page
+    /// table is active.
+    pub fn map_range(
+        &mut self,
+        range: &MemoryRegion,
+        pa: PhysicalAddress,
+        flags: Attributes,
+    ) -> Result<(), MapError> {
+        self.root.map_range(range, pa, flags)?;
+        #[cfg(target_arch = "aarch64")]
+        unsafe {
+            // Safe because this is just a memory barrier.
+            asm!("dsb ishst");
+        }
+        Ok(())
+    }
+}
+
+impl<T: Translation + Clone> Drop for Mapping<T> {
+    fn drop(&mut self) {
+        if self.previous_ttbr.is_some() {
+            #[cfg(target_arch = "aarch64")]
+            self.deactivate();
+        }
+    }
+}
diff --git a/src/linearmap.rs b/src/linearmap.rs
new file mode 100644
index 0000000..7dd7c09
--- /dev/null
+++ b/src/linearmap.rs
@@ -0,0 +1,414 @@
+// Copyright 2022 The aarch64-paging Authors.
+// This project is dual-licensed under Apache 2.0 and MIT terms.
+// See LICENSE-APACHE and LICENSE-MIT for details.
+
+//! Functionality for managing page tables with linear mapping.
+//!
+//! See [`LinearMap`] for details on how to use it.
+
+use crate::{
+    paging::{
+        deallocate, is_aligned, Attributes, MemoryRegion, PageTable, PhysicalAddress, Translation,
+        VaRange, VirtualAddress, PAGE_SIZE,
+    },
+    MapError, Mapping,
+};
+use core::ptr::NonNull;
+
+/// Linear mapping, where every virtual address is either unmapped or mapped to an IPA with a fixed
+/// offset.
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub struct LinearTranslation {
+    /// The offset from a virtual address to the corresponding (intermediate) physical address.
+    offset: isize,
+}
+
+impl LinearTranslation {
+    /// Constructs a new linear translation, which will map a virtual address `va` to the
+    /// (intermediate) physical address `va + offset`.
+    ///
+    /// The `offset` must be a multiple of [`PAGE_SIZE`]; if not this will panic.
+    pub fn new(offset: isize) -> Self {
+        if !is_aligned(offset.unsigned_abs(), PAGE_SIZE) {
+            panic!(
+                "Invalid offset {}, must be a multiple of page size {}.",
+                offset, PAGE_SIZE,
+            );
+        }
+        Self { offset }
+    }
+
+    fn virtual_to_physical(&self, va: VirtualAddress) -> Result<PhysicalAddress, MapError> {
+        if let Some(pa) = checked_add_to_unsigned(va.0 as isize, self.offset) {
+            Ok(PhysicalAddress(pa))
+        } else {
+            Err(MapError::InvalidVirtualAddress(va))
+        }
+    }
+}
+
+impl Translation for LinearTranslation {
+    fn allocate_table(&self) -> (NonNull<PageTable>, PhysicalAddress) {
+        let table = PageTable::new();
+        // Assume that the same linear mapping is used everywhere.
+        let va = VirtualAddress(table.as_ptr() as usize);
+
+        let pa = self.virtual_to_physical(va).expect(
+            "Allocated subtable with virtual address which doesn't correspond to any physical address."
+        );
+        (table, pa)
+    }
+
+    unsafe fn deallocate_table(&self, page_table: NonNull<PageTable>) {
+        deallocate(page_table);
+    }
+
+    fn physical_to_virtual(&self, pa: PhysicalAddress) -> NonNull<PageTable> {
+        let signed_pa = pa.0 as isize;
+        if signed_pa < 0 {
+            panic!("Invalid physical address {} for pagetable", pa);
+        }
+        if let Some(va) = signed_pa.checked_sub(self.offset) {
+            if let Some(ptr) = NonNull::new(va as *mut PageTable) {
+                ptr
+            } else {
+                panic!(
+                    "Invalid physical address {} for pagetable (translated to virtual address 0)",
+                    pa
+                )
+            }
+        } else {
+            panic!("Invalid physical address {} for pagetable", pa);
+        }
+    }
+}
+
+/// Adds two signed values, returning an unsigned value or `None` if it would overflow.
+fn checked_add_to_unsigned(a: isize, b: isize) -> Option<usize> {
+    a.checked_add(b)?.try_into().ok()
+}
+
+/// Manages a level 1 page table using linear mapping, where every virtual address is either
+/// unmapped or mapped to an IPA with a fixed offset.
+///
+/// This assumes that the same linear mapping is used both for the page table being managed, and for
+/// code that is managing it.
+#[derive(Debug)]
+pub struct LinearMap {
+    mapping: Mapping<LinearTranslation>,
+}
+
+impl LinearMap {
+    /// Creates a new identity-mapping page table with the given ASID, root level and offset, for
+    /// use in the given TTBR.
+    ///
+    /// This will map any virtual address `va` which is added to the table to the physical address
+    /// `va + offset`.
+    ///
+    /// The `offset` must be a multiple of [`PAGE_SIZE`]; if not this will panic.
+    pub fn new(asid: usize, rootlevel: usize, offset: isize, va_range: VaRange) -> Self {
+        Self {
+            mapping: Mapping::new(LinearTranslation::new(offset), asid, rootlevel, va_range),
+        }
+    }
+
+    /// Activates the page table by setting `TTBR0_EL1` to point to it, and saves the previous value
+    /// of `TTBR0_EL1` so that it may later be restored by [`deactivate`](Self::deactivate).
+    ///
+    /// Panics if a previous value of `TTBR0_EL1` is already saved and not yet used by a call to
+    /// `deactivate`.
+    #[cfg(target_arch = "aarch64")]
+    pub fn activate(&mut self) {
+        self.mapping.activate()
+    }
+
+    /// Deactivates the page table, by setting `TTBR0_EL1` back to the value it had before
+    /// [`activate`](Self::activate) was called, and invalidating the TLB for this page table's
+    /// configured ASID.
+    ///
+    /// Panics if there is no saved `TTRB0_EL1` value because `activate` has not previously been
+    /// called.
+    #[cfg(target_arch = "aarch64")]
+    pub fn deactivate(&mut self) {
+        self.mapping.deactivate()
+    }
+
+    /// Maps the given range of virtual addresses to the corresponding physical addresses with the
+    /// given flags.
+    ///
+    /// This should generally only be called while the page table is not active. In particular, any
+    /// change that may require break-before-make per the architecture must be made while the page
+    /// table is inactive. Mapping a previously unmapped memory range may be done while the page
+    /// table is active.
+    ///
+    /// # Errors
+    ///
+    /// Returns [`MapError::InvalidVirtualAddress`] if adding the configured offset to any virtual
+    /// address within the `range` would result in overflow.
+    ///
+    /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
+    /// largest virtual address covered by the page table given its root level.
+    pub fn map_range(&mut self, range: &MemoryRegion, flags: Attributes) -> Result<(), MapError> {
+        let pa = self
+            .mapping
+            .root
+            .translation()
+            .virtual_to_physical(range.start())?;
+        self.mapping.map_range(range, pa, flags)
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use crate::{
+        paging::{Attributes, MemoryRegion, BITS_PER_LEVEL, PAGE_SIZE},
+        MapError,
+    };
+
+    const MAX_ADDRESS_FOR_ROOT_LEVEL_1: usize = 1 << 39;
+    const GIB_512_S: isize = 512 * 1024 * 1024 * 1024;
+    const GIB_512: usize = 512 * 1024 * 1024 * 1024;
+
+    #[test]
+    fn map_valid() {
+        // A single byte at the start of the address space.
+        let mut pagetable = LinearMap::new(1, 1, 4096, VaRange::Lower);
+        assert_eq!(
+            pagetable.map_range(&MemoryRegion::new(0, 1), Attributes::NORMAL),
+            Ok(())
+        );
+
+        // Two pages at the start of the address space.
+        let mut pagetable = LinearMap::new(1, 1, 4096, VaRange::Lower);
+        assert_eq!(
+            pagetable.map_range(&MemoryRegion::new(0, PAGE_SIZE * 2), Attributes::NORMAL),
+            Ok(())
+        );
+
+        // A single byte at the end of the address space.
+        let mut pagetable = LinearMap::new(1, 1, 4096, VaRange::Lower);
+        assert_eq!(
+            pagetable.map_range(
+                &MemoryRegion::new(
+                    MAX_ADDRESS_FOR_ROOT_LEVEL_1 - 1,
+                    MAX_ADDRESS_FOR_ROOT_LEVEL_1
+                ),
+                Attributes::NORMAL
+            ),
+            Ok(())
+        );
+
+        // The entire valid address space. Use an offset that is a multiple of the level 2 block
+        // size to avoid mapping everything as pages as that is really slow.
+        const LEVEL_2_BLOCK_SIZE: usize = PAGE_SIZE << BITS_PER_LEVEL;
+        let mut pagetable = LinearMap::new(1, 1, LEVEL_2_BLOCK_SIZE as isize, VaRange::Lower);
+        assert_eq!(
+            pagetable.map_range(
+                &MemoryRegion::new(0, MAX_ADDRESS_FOR_ROOT_LEVEL_1),
+                Attributes::NORMAL
+            ),
+            Ok(())
+        );
+    }
+
+    #[test]
+    fn map_valid_negative_offset() {
+        // A single byte which maps to IPA 0.
+        let mut pagetable = LinearMap::new(1, 1, -(PAGE_SIZE as isize), VaRange::Lower);
+        assert_eq!(
+            pagetable.map_range(
+                &MemoryRegion::new(PAGE_SIZE, PAGE_SIZE + 1),
+                Attributes::NORMAL
+            ),
+            Ok(())
+        );
+
+        // Two pages at the start of the address space.
+        let mut pagetable = LinearMap::new(1, 1, -(PAGE_SIZE as isize), VaRange::Lower);
+        assert_eq!(
+            pagetable.map_range(
+                &MemoryRegion::new(PAGE_SIZE, PAGE_SIZE * 3),
+                Attributes::NORMAL
+            ),
+            Ok(())
+        );
+
+        // A single byte at the end of the address space.
+        let mut pagetable = LinearMap::new(1, 1, -(PAGE_SIZE as isize), VaRange::Lower);
+        assert_eq!(
+            pagetable.map_range(
+                &MemoryRegion::new(
+                    MAX_ADDRESS_FOR_ROOT_LEVEL_1 - 1,
+                    MAX_ADDRESS_FOR_ROOT_LEVEL_1
+                ),
+                Attributes::NORMAL
+            ),
+            Ok(())
+        );
+
+        // The entire valid address space. Use an offset that is a multiple of the level 2 block
+        // size to avoid mapping everything as pages as that is really slow.
+        const LEVEL_2_BLOCK_SIZE: usize = PAGE_SIZE << BITS_PER_LEVEL;
+        let mut pagetable = LinearMap::new(1, 1, -(LEVEL_2_BLOCK_SIZE as isize), VaRange::Lower);
+        assert_eq!(
+            pagetable.map_range(
+                &MemoryRegion::new(LEVEL_2_BLOCK_SIZE, MAX_ADDRESS_FOR_ROOT_LEVEL_1),
+                Attributes::NORMAL
+            ),
+            Ok(())
+        );
+    }
+
+    #[test]
+    fn map_out_of_range() {
+        let mut pagetable = LinearMap::new(1, 1, 4096, VaRange::Lower);
+
+        // One byte, just past the edge of the valid range.
+        assert_eq!(
+            pagetable.map_range(
+                &MemoryRegion::new(
+                    MAX_ADDRESS_FOR_ROOT_LEVEL_1,
+                    MAX_ADDRESS_FOR_ROOT_LEVEL_1 + 1,
+                ),
+                Attributes::NORMAL
+            ),
+            Err(MapError::AddressRange(VirtualAddress(
+                MAX_ADDRESS_FOR_ROOT_LEVEL_1 + PAGE_SIZE
+            )))
+        );
+
+        // From 0 to just past the valid range.
+        assert_eq!(
+            pagetable.map_range(
+                &MemoryRegion::new(0, MAX_ADDRESS_FOR_ROOT_LEVEL_1 + 1),
+                Attributes::NORMAL
+            ),
+            Err(MapError::AddressRange(VirtualAddress(
+                MAX_ADDRESS_FOR_ROOT_LEVEL_1 + PAGE_SIZE
+            )))
+        );
+    }
+
+    #[test]
+    fn map_invalid_offset() {
+        let mut pagetable = LinearMap::new(1, 1, -4096, VaRange::Lower);
+
+        // One byte, with an offset which would map it to a negative IPA.
+        assert_eq!(
+            pagetable.map_range(&MemoryRegion::new(0, 1), Attributes::NORMAL),
+            Err(MapError::InvalidVirtualAddress(VirtualAddress(0)))
+        );
+    }
+
+    #[test]
+    fn physical_address_in_range_ttbr0() {
+        let translation = LinearTranslation::new(4096);
+        assert_eq!(
+            translation.physical_to_virtual(PhysicalAddress(8192)),
+            NonNull::new(4096 as *mut PageTable).unwrap(),
+        );
+        assert_eq!(
+            translation.physical_to_virtual(PhysicalAddress(GIB_512 + 4096)),
+            NonNull::new(GIB_512 as *mut PageTable).unwrap(),
+        );
+    }
+
+    #[test]
+    #[should_panic]
+    fn physical_address_to_zero_ttbr0() {
+        let translation = LinearTranslation::new(4096);
+        translation.physical_to_virtual(PhysicalAddress(4096));
+    }
+
+    #[test]
+    #[should_panic]
+    fn physical_address_out_of_range_ttbr0() {
+        let translation = LinearTranslation::new(4096);
+        translation.physical_to_virtual(PhysicalAddress(-4096_isize as usize));
+    }
+
+    #[test]
+    fn physical_address_in_range_ttbr1() {
+        // Map the 512 GiB region at the top of virtual address space to one page above the bottom
+        // of physical address space.
+        let translation = LinearTranslation::new(GIB_512_S + 4096);
+        assert_eq!(
+            translation.physical_to_virtual(PhysicalAddress(8192)),
+            NonNull::new((4096 - GIB_512_S) as *mut PageTable).unwrap(),
+        );
+        assert_eq!(
+            translation.physical_to_virtual(PhysicalAddress(GIB_512)),
+            NonNull::new(-4096_isize as *mut PageTable).unwrap(),
+        );
+    }
+
+    #[test]
+    #[should_panic]
+    fn physical_address_to_zero_ttbr1() {
+        // Map the 512 GiB region at the top of virtual address space to the bottom of physical
+        // address space.
+        let translation = LinearTranslation::new(GIB_512_S);
+        translation.physical_to_virtual(PhysicalAddress(GIB_512));
+    }
+
+    #[test]
+    #[should_panic]
+    fn physical_address_out_of_range_ttbr1() {
+        // Map the 512 GiB region at the top of virtual address space to the bottom of physical
+        // address space.
+        let translation = LinearTranslation::new(GIB_512_S);
+        translation.physical_to_virtual(PhysicalAddress(-4096_isize as usize));
+    }
+
+    #[test]
+    fn virtual_address_out_of_range() {
+        let translation = LinearTranslation::new(-4096);
+        let va = VirtualAddress(1024);
+        assert_eq!(
+            translation.virtual_to_physical(va),
+            Err(MapError::InvalidVirtualAddress(va))
+        )
+    }
+
+    #[test]
+    fn virtual_address_range_ttbr1() {
+        // Map the 512 GiB region at the top of virtual address space to the bottom of physical
+        // address space.
+        let translation = LinearTranslation::new(GIB_512_S);
+
+        // The first page in the region covered by TTBR1.
+        assert_eq!(
+            translation.virtual_to_physical(VirtualAddress(0xffff_ff80_0000_0000)),
+            Ok(PhysicalAddress(0))
+        );
+        // The last page in the region covered by TTBR1.
+        assert_eq!(
+            translation.virtual_to_physical(VirtualAddress(0xffff_ffff_ffff_f000)),
+            Ok(PhysicalAddress(0x7f_ffff_f000))
+        );
+    }
+
+    #[test]
+    fn block_mapping() {
+        // Test that block mapping is used when the PA is appropriately aligned...
+        let mut pagetable = LinearMap::new(1, 1, 1 << 30, VaRange::Lower);
+        pagetable
+            .map_range(&MemoryRegion::new(0, 1 << 30), Attributes::NORMAL)
+            .unwrap();
+        assert_eq!(
+            pagetable.mapping.root.mapping_level(VirtualAddress(0)),
+            Some(1)
+        );
+
+        // ...but not when it is not.
+        let mut pagetable = LinearMap::new(1, 1, 1 << 29, VaRange::Lower);
+        pagetable
+            .map_range(&MemoryRegion::new(0, 1 << 30), Attributes::NORMAL)
+            .unwrap();
+        assert_eq!(
+            pagetable.mapping.root.mapping_level(VirtualAddress(0)),
+            Some(2)
+        );
+    }
+}
diff --git a/src/paging.rs b/src/paging.rs
index d463bef..7606d80 100644
--- a/src/paging.rs
+++ b/src/paging.rs
@@ -5,10 +5,10 @@
 //! Generic aarch64 page table manipulation functionality which doesn't assume anything about how
 //! addresses are mapped.
 
-use crate::AddressRangeError;
-use alloc::alloc::{alloc_zeroed, handle_alloc_error};
+use crate::MapError;
+#[cfg(feature = "alloc")]
+use alloc::alloc::{alloc_zeroed, dealloc, handle_alloc_error, Layout};
 use bitflags::bitflags;
-use core::alloc::Layout;
 use core::fmt::{self, Debug, Display, Formatter};
 use core::marker::PhantomData;
 use core::ops::{Add, Range, Sub};
@@ -26,22 +26,21 @@
 /// page size.
 pub const BITS_PER_LEVEL: usize = PAGE_SHIFT - 3;
 
+/// Which virtual address range a page table is for, i.e. which TTBR register to use for it.
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub enum VaRange {
+    /// The page table covers the bottom of the virtual address space (starting at address 0), so
+    /// will be used with `TTBR0`.
+    Lower,
+    /// The page table covers the top of the virtual address space (ending at address
+    /// 0xffff_ffff_ffff_ffff), so will be used with `TTBR1`.
+    Upper,
+}
+
 /// An aarch64 virtual address, the input type of a stage 1 page table.
 #[derive(Copy, Clone, Eq, Ord, PartialEq, PartialOrd)]
 pub struct VirtualAddress(pub usize);
 
-impl<T> From<*const T> for VirtualAddress {
-    fn from(pointer: *const T) -> Self {
-        Self(pointer as usize)
-    }
-}
-
-impl<T> From<*mut T> for VirtualAddress {
-    fn from(pointer: *mut T) -> Self {
-        Self(pointer as usize)
-    }
-}
-
 impl Display for VirtualAddress {
     fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
         write!(f, "{:#018x}", self.0)
@@ -70,6 +69,14 @@
     }
 }
 
+impl Sub<usize> for VirtualAddress {
+    type Output = Self;
+
+    fn sub(self, other: usize) -> Self {
+        Self(self.0 - other)
+    }
+}
+
 /// A range of virtual addresses which may be mapped in a page table.
 #[derive(Clone, Eq, PartialEq)]
 pub struct MemoryRegion(Range<VirtualAddress>);
@@ -107,6 +114,14 @@
     }
 }
 
+impl Sub<usize> for PhysicalAddress {
+    type Output = Self;
+
+    fn sub(self, other: usize) -> Self {
+        Self(self.0 - other)
+    }
+}
+
 /// Returns the size in bytes of the address space covered by a single entry in the page table at
 /// the given level.
 fn granularity_at_level(level: usize) -> usize {
@@ -117,8 +132,20 @@
 /// physical addresses used in the page tables can be converted into virtual addresses that can be
 /// used to access their contents from the code.
 pub trait Translation {
-    fn virtual_to_physical(va: VirtualAddress) -> PhysicalAddress;
-    fn physical_to_virtual(pa: PhysicalAddress) -> VirtualAddress;
+    /// Allocates a zeroed page, which is already mapped, to be used for a new subtable of some
+    /// pagetable. Returns both a pointer to the page and its physical address.
+    fn allocate_table(&self) -> (NonNull<PageTable>, PhysicalAddress);
+
+    /// Deallocates the page which was previous allocated by [`allocate_table`](Self::allocate_table).
+    ///
+    /// # Safety
+    ///
+    /// The memory must have been allocated by `allocate_table` on the same `Translation`, and not
+    /// yet deallocated.
+    unsafe fn deallocate_table(&self, page_table: NonNull<PageTable>);
+
+    /// Given the physical address of a subtable, returns the virtual address at which it is mapped.
+    fn physical_to_virtual(&self, pa: PhysicalAddress) -> NonNull<PageTable>;
 }
 
 impl MemoryRegion {
@@ -172,9 +199,11 @@
 }
 
 /// A complete hierarchy of page tables including all levels.
-#[derive(Debug)]
 pub struct RootTable<T: Translation> {
     table: PageTableWithLevel<T>,
+    translation: T,
+    pa: PhysicalAddress,
+    va_range: VaRange,
 }
 
 impl<T: Translation> RootTable<T> {
@@ -183,12 +212,16 @@
     /// The level must be between 0 and 3; level -1 (for 52-bit addresses with LPA2) is not
     /// currently supported by this library. The value of `TCR_EL1.T0SZ` must be set appropriately
     /// to match.
-    pub fn new(level: usize) -> Self {
+    pub fn new(translation: T, level: usize, va_range: VaRange) -> Self {
         if level > LEAF_LEVEL {
             panic!("Invalid root table level {}.", level);
         }
+        let (table, pa) = PageTableWithLevel::new(&translation, level);
         RootTable {
-            table: PageTableWithLevel::new(level),
+            table,
+            translation,
+            pa,
+            va_range,
         }
     }
 
@@ -200,29 +233,81 @@
         granularity_at_level(self.table.level) << BITS_PER_LEVEL
     }
 
-    /// Recursively maps a range into the pagetable hierarchy starting at the root level.
+    /// Recursively maps a range into the pagetable hierarchy starting at the root level, mapping
+    /// the pages to the corresponding physical address range starting at `pa`.
+    ///
+    /// Returns an error if the virtual address range is out of the range covered by the pagetable
     pub fn map_range(
         &mut self,
         range: &MemoryRegion,
+        pa: PhysicalAddress,
         flags: Attributes,
-    ) -> Result<(), AddressRangeError> {
-        if range.end().0 > self.size() {
-            return Err(AddressRangeError);
+    ) -> Result<(), MapError> {
+        if range.end() < range.start() {
+            return Err(MapError::RegionBackwards(range.clone()));
+        }
+        match self.va_range {
+            VaRange::Lower => {
+                if (range.start().0 as isize) < 0 {
+                    return Err(MapError::AddressRange(range.start()));
+                } else if range.end().0 > self.size() {
+                    return Err(MapError::AddressRange(range.end()));
+                }
+            }
+            VaRange::Upper => {
+                if range.start().0 as isize >= 0
+                    || (range.start().0 as isize).unsigned_abs() > self.size()
+                {
+                    return Err(MapError::AddressRange(range.start()));
+                }
+            }
         }
 
-        self.table.map_range(range, flags);
+        self.table.map_range(&self.translation, range, pa, flags);
+
         Ok(())
     }
 
     /// Returns the physical address of the root table in memory.
     pub fn to_physical(&self) -> PhysicalAddress {
-        self.table.to_physical()
+        self.pa
+    }
+
+    /// Returns the TTBR for which this table is intended.
+    pub fn va_range(&self) -> VaRange {
+        self.va_range
+    }
+
+    /// Returns a reference to the translation used for this page table.
+    pub fn translation(&self) -> &T {
+        &self.translation
+    }
+
+    /// Returns the level of mapping used for the given virtual address:
+    /// - `None` if it is unmapped
+    /// - `Some(LEAF_LEVEL)` if it is mapped as a single page
+    /// - `Some(level)` if it is mapped as a block at `level`
+    #[cfg(test)]
+    pub(crate) fn mapping_level(&self, va: VirtualAddress) -> Option<usize> {
+        self.table.mapping_level(&self.translation, va)
+    }
+}
+
+impl<T: Translation> Debug for RootTable<T> {
+    fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
+        writeln!(
+            f,
+            "RootTable {{ pa: {}, level: {}, table:",
+            self.pa, self.table.level
+        )?;
+        self.table.fmt_indented(f, &self.translation, 0)?;
+        write!(f, "}}")
     }
 }
 
 impl<T: Translation> Drop for RootTable<T> {
     fn drop(&mut self) {
-        self.table.free()
+        self.table.free(&self.translation)
     }
 }
 
@@ -289,28 +374,45 @@
 /// Smart pointer which owns a [`PageTable`] and knows what level it is at. This allows it to
 /// implement `Debug` and `Drop`, as walking the page table hierachy requires knowing the starting
 /// level.
+#[derive(Debug)]
 struct PageTableWithLevel<T: Translation> {
     table: NonNull<PageTable>,
     level: usize,
-    _phantom_data: PhantomData<T>,
+    _translation: PhantomData<T>,
 }
 
 impl<T: Translation> PageTableWithLevel<T> {
-    /// Allocates a new, zeroed, appropriately-aligned page table on the heap.
-    fn new(level: usize) -> Self {
+    /// Allocates a new, zeroed, appropriately-aligned page table with the given translation,
+    /// returning both a pointer to it and its physical address.
+    fn new(translation: &T, level: usize) -> (Self, PhysicalAddress) {
         assert!(level <= LEAF_LEVEL);
+        let (table, pa) = translation.allocate_table();
+        (
+            // Safe because the pointer has been allocated with the appropriate layout, and the
+            // memory is zeroed which is valid initialisation for a PageTable.
+            Self::from_pointer(table, level),
+            pa,
+        )
+    }
+
+    fn from_pointer(table: NonNull<PageTable>, level: usize) -> Self {
         Self {
-            // Safe because the pointer has been allocated with the appropriate layout by the global
-            // allocator, and the memory is zeroed which is valid initialisation for a PageTable.
-            table: unsafe { allocate_zeroed() },
+            table,
             level,
-            _phantom_data: PhantomData,
+            _translation: PhantomData::default(),
         }
     }
 
-    /// Returns the physical address of this page table in memory.
-    fn to_physical(&self) -> PhysicalAddress {
-        T::virtual_to_physical(VirtualAddress::from(self.table.as_ptr()))
+    /// Returns a reference to the descriptor corresponding to a given virtual address.
+    #[cfg(test)]
+    fn get_entry(&self, va: VirtualAddress) -> &Descriptor {
+        let shift = PAGE_SHIFT + (LEAF_LEVEL - self.level) * BITS_PER_LEVEL;
+        let index = (va.0 >> shift) % (1 << BITS_PER_LEVEL);
+        // Safe because we know that the pointer is properly aligned, dereferenced and initialised,
+        // and nothing else can access the page table while we hold a mutable reference to the
+        // PageTableWithLevel (assuming it is not currently active).
+        let table = unsafe { self.table.as_ref() };
+        &table.entries[index]
     }
 
     /// Returns a mutable reference to the descriptor corresponding to a given virtual address.
@@ -324,9 +426,23 @@
         &mut table.entries[index]
     }
 
-    fn map_range(&mut self, range: &MemoryRegion, flags: Attributes) {
-        let mut pa = T::virtual_to_physical(range.start());
+    /// Maps the the given virtual address range in this pagetable to the corresponding physical
+    /// address range starting at the given `pa`, recursing into any subtables as necessary.
+    ///
+    /// Assumes that the entire range is within the range covered by this pagetable.
+    ///
+    /// Panics if the `translation` doesn't provide a corresponding physical address for some
+    /// virtual address within the range, as there is no way to roll back to a safe state so this
+    /// should be checked by the caller beforehand.
+    fn map_range(
+        &mut self,
+        translation: &T,
+        range: &MemoryRegion,
+        mut pa: PhysicalAddress,
+        flags: Attributes,
+    ) {
         let level = self.level;
+        let granularity = granularity_at_level(level);
 
         for chunk in range.split(level) {
             let entry = self.get_entry_mut(chunk.0.start);
@@ -334,35 +450,47 @@
             if level == LEAF_LEVEL {
                 // Put down a page mapping.
                 entry.set(pa, flags | Attributes::ACCESSED | Attributes::TABLE_OR_PAGE);
-            } else if chunk.is_block(level) && !entry.is_table_or_page() {
+            } else if chunk.is_block(level)
+                && !entry.is_table_or_page()
+                && is_aligned(pa.0, granularity)
+            {
                 // Rather than leak the entire subhierarchy, only put down
                 // a block mapping if the region is not already covered by
                 // a table mapping.
                 entry.set(pa, flags | Attributes::ACCESSED);
             } else {
-                let mut subtable = if let Some(subtable) = entry.subtable::<T>(level) {
+                let mut subtable = if let Some(subtable) = entry.subtable(translation, level) {
                     subtable
                 } else {
                     let old = *entry;
-                    let mut subtable = Self::new(level + 1);
-                    if let Some(old_flags) = old.flags() {
-                        let granularity = granularity_at_level(level);
+                    let (mut subtable, subtable_pa) = Self::new(translation, level + 1);
+                    if let (Some(old_flags), Some(old_pa)) = (old.flags(), old.output_address()) {
                         // Old was a valid block entry, so we need to split it.
                         // Recreate the entire block in the newly added table.
                         let a = align_down(chunk.0.start.0, granularity);
                         let b = align_up(chunk.0.end.0, granularity);
-                        subtable.map_range(&MemoryRegion::new(a, b), old_flags);
+                        subtable.map_range(
+                            translation,
+                            &MemoryRegion::new(a, b),
+                            old_pa,
+                            old_flags,
+                        );
                     }
-                    entry.set(subtable.to_physical(), Attributes::TABLE_OR_PAGE);
+                    entry.set(subtable_pa, Attributes::TABLE_OR_PAGE);
                     subtable
                 };
-                subtable.map_range(&chunk, flags);
+                subtable.map_range(translation, &chunk, pa, flags);
             }
             pa.0 += chunk.len();
         }
     }
 
-    fn fmt_indented(&self, f: &mut Formatter, indentation: usize) -> Result<(), fmt::Error> {
+    fn fmt_indented(
+        &self,
+        f: &mut Formatter,
+        translation: &T,
+        indentation: usize,
+    ) -> Result<(), fmt::Error> {
         // Safe because we know that the pointer is aligned, initialised and dereferencable, and the
         // PageTable won't be mutated while we are using it.
         let table = unsafe { self.table.as_ref() };
@@ -381,8 +509,8 @@
                 }
             } else {
                 writeln!(f, "{:indentation$}{}: {:?}", "", i, table.entries[i])?;
-                if let Some(subtable) = table.entries[i].subtable::<T>(self.level) {
-                    subtable.fmt_indented(f, indentation + 2)?;
+                if let Some(subtable) = table.entries[i].subtable(translation, self.level) {
+                    subtable.fmt_indented(f, translation, indentation + 2)?;
                 }
                 i += 1;
             }
@@ -392,34 +520,61 @@
 
     /// Frees the memory used by this pagetable and all subtables. It is not valid to access the
     /// page table after this.
-    fn free(&mut self) {
+    fn free(&mut self, translation: &T) {
         // Safe because we know that the pointer is aligned, initialised and dereferencable, and the
         // PageTable won't be mutated while we are freeing it.
         let table = unsafe { self.table.as_ref() };
         for entry in table.entries {
-            if let Some(mut subtable) = entry.subtable::<T>(self.level) {
-                // Safe because the subtable was allocated by `PageTable::new` with the global
-                // allocator and appropriate layout.
-                subtable.free();
+            if let Some(mut subtable) = entry.subtable(translation, self.level) {
+                // Safe because the subtable was allocated by `PageTableWithLevel::new` with the
+                // global allocator and appropriate layout.
+                subtable.free(translation);
+            }
+        }
+        // Safe because the table was allocated by `PageTableWithLevel::new` with the global
+        // allocator and appropriate layout.
+        unsafe {
+            // Actually free the memory used by the `PageTable`.
+            translation.deallocate_table(self.table);
+        }
+    }
+
+    /// Returns the level of mapping used for the given virtual address:
+    /// - `None` if it is unmapped
+    /// - `Some(LEAF_LEVEL)` if it is mapped as a single page
+    /// - `Some(level)` if it is mapped as a block at `level`
+    #[cfg(test)]
+    fn mapping_level(&self, translation: &T, va: VirtualAddress) -> Option<usize> {
+        let entry = self.get_entry(va);
+        if let Some(subtable) = entry.subtable(translation, self.level) {
+            subtable.mapping_level(translation, va)
+        } else {
+            if entry.is_valid() {
+                Some(self.level)
+            } else {
+                None
             }
         }
     }
 }
 
-impl<T: Translation> Debug for PageTableWithLevel<T> {
-    fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
-        writeln!(f, "PageTableWithLevel {{ level: {}, table:", self.level)?;
-        self.fmt_indented(f, 0)?;
-        write!(f, "}}")
-    }
-}
-
 /// A single level of a page table.
 #[repr(C, align(4096))]
-struct PageTable {
+pub struct PageTable {
     entries: [Descriptor; 1 << BITS_PER_LEVEL],
 }
 
+impl PageTable {
+    /// Allocates a new zeroed, appropriately-aligned pagetable on the heap using the global
+    /// allocator and returns a pointer to it.
+    #[cfg(feature = "alloc")]
+    pub fn new() -> NonNull<Self> {
+        // Safe because the pointer has been allocated with the appropriate layout by the global
+        // allocator, and the memory is zeroed which is valid initialisation for a PageTable.
+        unsafe { allocate_zeroed() }
+    }
+}
+
 /// An entry in a page table.
 ///
 /// A descriptor may be:
@@ -466,16 +621,15 @@
         self.0 = pa.0 | (flags | Attributes::VALID).bits();
     }
 
-    fn subtable<T: Translation>(&self, level: usize) -> Option<PageTableWithLevel<T>> {
+    fn subtable<T: Translation>(
+        &self,
+        translation: &T,
+        level: usize,
+    ) -> Option<PageTableWithLevel<T>> {
         if level < LEAF_LEVEL && self.is_table_or_page() {
             if let Some(output_address) = self.output_address() {
-                let va = T::physical_to_virtual(output_address);
-                let ptr = va.0 as *mut PageTable;
-                return Some(PageTableWithLevel {
-                    level: level + 1,
-                    table: NonNull::new(ptr).expect("Subtable pointer must be non-null."),
-                    _phantom_data: PhantomData,
-                });
+                let table = translation.physical_to_virtual(output_address);
+                return Some(PageTableWithLevel::from_pointer(table, level + 1));
             }
         }
         None
@@ -497,6 +651,7 @@
 /// # Safety
 ///
 /// It must be valid to initialise the type `T` by simply zeroing its memory.
+#[cfg(feature = "alloc")]
 unsafe fn allocate_zeroed<T>() -> NonNull<T> {
     let layout = Layout::new::<T>();
     // Safe because we know the layout has non-zero size.
@@ -508,6 +663,18 @@
     NonNull::new_unchecked(pointer as *mut T)
 }
 
+/// Deallocates the heap space for a `T` which was previously allocated by `allocate_zeroed`.
+///
+/// # Safety
+///
+/// The memory must have been allocated by the global allocator, with the layout for `T`, and not
+/// yet deallocated.
+#[cfg(feature = "alloc")]
+pub(crate) unsafe fn deallocate<T>(ptr: NonNull<T>) {
+    let layout = Layout::new::<T>();
+    dealloc(ptr.as_ptr() as *mut u8, layout);
+}
+
 const fn align_down(value: usize, alignment: usize) -> usize {
     value & !(alignment - 1)
 }
@@ -516,11 +683,17 @@
     ((value - 1) | (alignment - 1)) + 1
 }
 
+pub(crate) const fn is_aligned(value: usize, alignment: usize) -> bool {
+    value & (alignment - 1) == 0
+}
+
 #[cfg(test)]
 mod tests {
     use super::*;
+    #[cfg(feature = "alloc")]
     use alloc::{format, string::ToString};
 
+    #[cfg(feature = "alloc")]
     #[test]
     fn display_memory_region() {
         let region = MemoryRegion::new(0x1234, 0x56789);
@@ -541,6 +714,7 @@
         assert_eq!(high - low, 0x1222);
     }
 
+    #[cfg(debug_assertions)]
     #[test]
     #[should_panic]
     fn subtract_virtual_address_overflow() {
@@ -563,6 +737,7 @@
         assert_eq!(high - low, 0x1222);
     }
 
+    #[cfg(debug_assertions)]
     #[test]
     #[should_panic]
     fn subtract_physical_address_overflow() {