swap: add helpers for pagesize
Using the cached page size shift has several performance benefits listed
at the comment of the file.
BUG=b:260543132
TEST=cargo test -p swap
Change-Id: Ic7a19135b7a2e29c032c73fa7dca1d853cdb5e48
Reviewed-on: https://chromium-review.googlesource.com/c/crosvm/crosvm/+/4125177
Reviewed-by: David Stevens <stevensd@chromium.org>
Reviewed-by: Dennis Kempin <denniskempin@google.com>
Commit-Queue: Shin Kawamura <kawasin@google.com>
diff --git a/Cargo.lock b/Cargo.lock
index 24c93c1..495bd10 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2009,6 +2009,7 @@
"data_model",
"libc",
"minijail",
+ "once_cell",
"remain",
"serde",
"serde_json",
diff --git a/swap/Cargo.toml b/swap/Cargo.toml
index 63c712b..7962ed2 100644
--- a/swap/Cargo.toml
+++ b/swap/Cargo.toml
@@ -14,6 +14,7 @@
data_model = "*"
libc = "*"
minijail = "*"
+once_cell = "*"
remain = "*"
serde = { version = "1", features = [ "derive" ] }
serde_json = "*"
diff --git a/swap/src/file.rs b/swap/src/file.rs
index baedf5b..2b96dbd 100644
--- a/swap/src/file.rs
+++ b/swap/src/file.rs
@@ -11,7 +11,6 @@
use std::path::Path;
use base::error;
-use base::pagesize;
use base::MemoryMapping;
use base::MemoryMappingBuilder;
use base::MmapError;
@@ -21,6 +20,10 @@
use data_model::VolatileSlice;
use thiserror::Error as ThisError;
+use crate::pagesize::bytes_to_pages;
+use crate::pagesize::is_page_aligned;
+use crate::pagesize::pages_to_bytes;
+
pub type Result<T> = std::result::Result<T, Error>;
#[derive(ThisError, Debug)]
@@ -92,7 +95,7 @@
.custom_flags(libc::O_TMPFILE | libc::O_EXCL)
.mode(0o000) // other processes with the same uid can't open the file
.open(dir_path)?;
- let file_mmap = MemoryMappingBuilder::new(num_of_pages * pagesize())
+ let file_mmap = MemoryMappingBuilder::new(pages_to_bytes(num_of_pages))
.from_file(&file)
.protection(Protection::read())
.build()?;
@@ -121,7 +124,9 @@
match self.state_list.get(idx) {
Some(is_present) => {
if *is_present {
- let slice = self.file_mmap.get_slice(idx * pagesize(), pagesize())?;
+ let slice = self
+ .file_mmap
+ .get_slice(pages_to_bytes(idx), pages_to_bytes(1))?;
Ok(Some(slice))
} else {
Ok(None)
@@ -159,16 +164,16 @@
/// the pagesize.
pub fn write_to_file(&mut self, idx: usize, mem_slice: &[u8]) -> Result<()> {
// validate
- if mem_slice.len() % pagesize() != 0 {
+ if !is_page_aligned(mem_slice.len()) {
// mem_slice size must align with page size.
return Err(Error::InvalidSize);
}
- let num_pages = mem_slice.len() / pagesize();
+ let num_pages = bytes_to_pages(mem_slice.len());
if idx + num_pages > self.state_list.len() {
return Err(Error::OutOfRange);
}
- let byte_offset = (idx * pagesize()) as u64;
+ let byte_offset = (pages_to_bytes(idx)) as u64;
self.file.write_all_at(mem_slice, byte_offset)?;
for i in idx..(idx + num_pages) {
self.state_list[i] = true;
@@ -217,12 +222,12 @@
idx += 1;
}
self.idx = idx;
- let num_of_page = idx - head_idx;
+ let num_of_pages = idx - head_idx;
// The offset and count must be correct and never cause [VolatileMemoryError].
let slice = self
.swap_file
.file_mmap
- .get_slice(head_idx * pagesize(), num_of_page * pagesize())
+ .get_slice(pages_to_bytes(head_idx), pages_to_bytes(num_of_pages))
.unwrap();
Some(Pages {
base_idx: head_idx,
@@ -236,6 +241,8 @@
use std::path::PathBuf;
use std::slice;
+ use base::pagesize;
+
use super::*;
#[test]
diff --git a/swap/src/lib.rs b/swap/src/lib.rs
index ba79246..d72100e 100644
--- a/swap/src/lib.rs
+++ b/swap/src/lib.rs
@@ -8,6 +8,7 @@
mod file;
mod logger;
+mod pagesize;
// this is public only for integration tests.
pub mod page_handler;
mod processes;
diff --git a/swap/src/page_handler.rs b/swap/src/page_handler.rs
index 327db0b..0c6710a 100644
--- a/swap/src/page_handler.rs
+++ b/swap/src/page_handler.rs
@@ -10,7 +10,6 @@
use std::path::Path;
use base::error;
-use base::pagesize;
use base::unix::FileDataIterator;
use base::AsRawDescriptor;
use data_model::VolatileSlice;
@@ -18,6 +17,12 @@
use crate::file::Error as FileError;
use crate::file::SwapFile;
+use crate::pagesize::addr_to_page_idx;
+use crate::pagesize::bytes_to_pages;
+use crate::pagesize::is_page_aligned;
+use crate::pagesize::page_base_addr;
+use crate::pagesize::page_idx_to_addr;
+use crate::pagesize::pages_to_bytes;
use crate::userfaultfd::UffdError;
use crate::userfaultfd::Userfaultfd;
@@ -71,7 +76,6 @@
/// All the addresses and sizes in bytes are converted to page id internally.
pub struct PageHandler {
regions: Vec<Region>,
- pagesize_shift: u32,
}
impl PageHandler {
@@ -83,16 +87,8 @@
/// * `regions` - the list of the region. the start address must align with page. the size must
/// be multiple of pagesize.
pub fn create(swap_dir: &Path, regions: &[Range<usize>]) -> Result<Self> {
- let pagesize_shift = pagesize().trailing_zeros();
- // pagesize() should be power of 2 in almost all cases. vmm-swap feature does not support
- // systems in which page size is not power of 2.
- if 1 << pagesize_shift != pagesize() {
- panic!("page size is not power of 2");
- }
-
let mut handler = Self {
regions: Vec::new(),
- pagesize_shift,
};
for address_range in regions {
@@ -102,26 +98,6 @@
Ok(handler)
}
- /// The page index of the page which contains the "addr".
- fn addr_to_page_idx(&self, addr: usize) -> usize {
- addr >> self.pagesize_shift
- }
-
- /// The head address of the page.
- fn page_idx_to_addr(&self, page_idx: usize) -> usize {
- page_idx << self.pagesize_shift
- }
-
- /// The head address of the page which contains the "addr".
- fn page_base_addr(&self, addr: usize) -> usize {
- (addr >> self.pagesize_shift) << self.pagesize_shift
- }
-
- fn is_page_aligned(&self, addr: usize) -> bool {
- let mask = (1 << self.pagesize_shift) - 1;
- addr & mask == 0
- }
-
fn find_region_position(&self, page_idx: usize) -> Option<usize> {
// sequential search the corresponding page map from the list. It should be fast enough
// because there are a few regions (usually only 1).
@@ -146,9 +122,9 @@
/// * `address_range` - the range of the region. the start address must align with page. the
/// size must be multiple of pagesize.
fn add_region(&mut self, swap_dir: &Path, address_range: &Range<usize>) -> Result<()> {
- let head_page_idx = self.addr_to_page_idx(address_range.start);
+ let head_page_idx = addr_to_page_idx(address_range.start);
let region_size = address_range.end - address_range.start;
- let num_of_pages = region_size >> self.pagesize_shift;
+ let num_of_pages = bytes_to_pages(region_size);
// find an overlaping region
match self.regions.iter().position(|region| {
@@ -163,14 +139,14 @@
Err(Error::RegionOverlap(
address_range.clone(),
- self.page_idx_to_addr(region.head_page_idx)
- ..(self.page_idx_to_addr(region.head_page_idx + region.file.num_pages())),
+ page_idx_to_addr(region.head_page_idx)
+ ..(page_idx_to_addr(region.head_page_idx + region.file.num_pages())),
))
}
None => {
let base_addr = address_range.start;
- assert!(self.is_page_aligned(base_addr));
- assert!(self.is_page_aligned(region_size));
+ assert!(is_page_aligned(base_addr));
+ assert!(is_page_aligned(region_size));
let file = SwapFile::new(swap_dir, num_of_pages)?;
self.regions.push(Region {
@@ -217,10 +193,10 @@
/// * `uffd` - the reference to the [Userfaultfd] for the faulting process.
/// * `address` - the address that triggered the page fault.
pub fn handle_page_fault(&mut self, uffd: &Userfaultfd, address: usize) -> Result<()> {
- let page_idx = self.addr_to_page_idx(address);
+ let page_idx = addr_to_page_idx(address);
// the head address of the page.
- let page_addr = self.page_base_addr(address);
- let page_size = 1 << self.pagesize_shift;
+ let page_addr = page_base_addr(address);
+ let page_size = pages_to_bytes(1);
let Region {
head_page_idx,
file,
@@ -276,15 +252,15 @@
/// head address of the next memory area of the freed area. (i.e. the exact tail address of
/// the memory area is `end_addr - 1`.)
pub fn handle_page_remove(&mut self, start_addr: usize, end_addr: usize) -> Result<()> {
- if !self.is_page_aligned(start_addr) {
+ if !is_page_aligned(start_addr) {
return Err(Error::InvalidAddress(start_addr));
- } else if !self.is_page_aligned(end_addr) {
+ } else if !is_page_aligned(end_addr) {
return Err(Error::InvalidAddress(end_addr));
}
- let start_page_idx = self.addr_to_page_idx(start_addr);
- let last_page_idx = self.addr_to_page_idx(end_addr);
+ let start_page_idx = addr_to_page_idx(start_addr);
+ let last_page_idx = addr_to_page_idx(end_addr);
for page_idx in start_page_idx..(last_page_idx) {
- let page_addr = self.page_idx_to_addr(page_idx);
+ let page_addr = page_idx_to_addr(page_idx);
let region = self
.find_region(page_idx)
.ok_or(Error::InvalidAddress(page_addr))?;
@@ -327,7 +303,7 @@
where
T: AsRawDescriptor,
{
- let head_page_idx = self.addr_to_page_idx(base_addr);
+ let head_page_idx = addr_to_page_idx(base_addr);
// use find_region_position instead of find_region() due to borrow checker.
let region_position = self
.find_region_position(head_page_idx)
@@ -335,18 +311,18 @@
if self.regions[region_position].head_page_idx != head_page_idx {
return Err(Error::InvalidAddress(base_addr));
}
- let region_size = self.regions[region_position].file.num_pages() << self.pagesize_shift;
+ let region_size = pages_to_bytes(self.regions[region_position].file.num_pages());
let file_data = FileDataIterator::new(memfd, base_offset, region_size as u64);
let mut swapped_size = 0;
for data_range in file_data {
// assert offset is page aligned
let offset = (data_range.start - base_offset) as usize;
- assert!(self.is_page_aligned(offset));
+ assert!(is_page_aligned(offset));
let addr = base_addr + offset;
- let page_idx = self.addr_to_page_idx(addr);
+ let page_idx = addr_to_page_idx(addr);
let size = (data_range.end - data_range.start) as usize;
- assert!(self.is_page_aligned(size));
+ assert!(is_page_aligned(size));
// safe because the page is within the range of the guest memory.
let mem_slice = unsafe { std::slice::from_raw_parts(addr as *const u8, size) };
self.regions[region_position]
@@ -367,7 +343,7 @@
libc::MADV_REMOVE,
);
}
- let swapped_pages = swapped_size >> self.pagesize_shift;
+ let swapped_pages = bytes_to_pages(swapped_size);
let mut region = &mut self.regions[region_position];
// Suppress error log on the first swap_out, since page counts are not initialized but
// zero.
@@ -397,13 +373,13 @@
for region in self.regions.iter() {
for pages in region.file.all_present_pages() {
let page_idx = region.head_page_idx + pages.base_idx;
- let page_addr = self.page_idx_to_addr(page_idx);
+ let page_addr = page_idx_to_addr(page_idx);
let size = pages.content.size();
Self::copy_all(uffd, page_addr, pages.content, false)?;
swapped_size += size;
}
}
- Ok(swapped_size >> self.pagesize_shift)
+ Ok(bytes_to_pages(swapped_size))
}
/// Returns count of pages active on the memory.
@@ -437,6 +413,6 @@
swapped_size += pages.content.size();
}
}
- swapped_size >> self.pagesize_shift
+ bytes_to_pages(swapped_size)
}
}
diff --git a/swap/src/pagesize.rs b/swap/src/pagesize.rs
new file mode 100644
index 0000000..966b1ff
--- /dev/null
+++ b/swap/src/pagesize.rs
@@ -0,0 +1,120 @@
+// Copyright 2022 The ChromiumOS Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//! Helpers to calculate values derived from page size.
+//!
+//! This has performance benefits from:
+//!
+//! * Avoiding calling `sysconf(_SC_PAGESIZE)` multiple times by caching the shift bit.
+//! * Using the (faster) shift instruction instead of (slower) multiply/divide instruction.
+
+use base::pagesize;
+use once_cell::sync::Lazy;
+
+static PAGESIZE_SHIFT: Lazy<u8> = Lazy::new(|| {
+ let pagesize_shift = pagesize().trailing_zeros();
+ // pagesize() should be power of 2 in almost all cases. vmm-swap feature does not support
+ // systems in which page size is not power of 2.
+ if 1 << pagesize_shift != pagesize() {
+ panic!("page size is not power of 2");
+ }
+ // pagesize_shift must be less than 64 since usize has at most 64 bits.
+ pagesize_shift as u8
+});
+
+/// Helper methods to calculate values derived from page size.
+///
+/// This has performance benefits from:
+///
+/// * Avoiding calling `sysconf(_SC_PAGESIZE)` multiple times by caching the shift bit.
+/// * Using the (faster) shift instruction instead of (slower) multiply/divide instruction.
+#[derive(Clone, Copy, Debug)]
+pub struct PagesizeShift(u8);
+
+/// The page index of the page which contains the "addr".
+#[inline]
+pub fn addr_to_page_idx(addr: usize) -> usize {
+ addr >> *PAGESIZE_SHIFT
+}
+
+/// The head address of the page.
+#[inline]
+pub fn page_idx_to_addr(page_idx: usize) -> usize {
+ page_idx << *PAGESIZE_SHIFT
+}
+
+/// The head address of the page which contains the "addr".
+#[inline]
+pub fn page_base_addr(addr: usize) -> usize {
+ let pagesize_shift = *PAGESIZE_SHIFT;
+ (addr >> pagesize_shift) << pagesize_shift
+}
+
+/// Whether the address is aligned with page.
+#[inline]
+pub fn is_page_aligned(addr: usize) -> bool {
+ let mask = (1 << *PAGESIZE_SHIFT) - 1;
+ addr & mask == 0
+}
+
+/// Convert the bytes to number of pages.
+///
+/// This rounds down if the `size_in_bytes` is not multiple of page size.
+#[inline]
+pub fn bytes_to_pages(size_in_bytes: usize) -> usize {
+ size_in_bytes >> *PAGESIZE_SHIFT
+}
+
+/// Convert number of pages to byte size.
+#[inline]
+pub fn pages_to_bytes(num_of_pages: usize) -> usize {
+ num_of_pages << *PAGESIZE_SHIFT
+}
+
+#[cfg(test)]
+mod tests {
+
+ use super::*;
+
+ #[test]
+ fn test_addr_to_page_idx() {
+ let addr = 10 * pagesize();
+ assert_eq!(addr_to_page_idx(addr - 1), 9);
+ assert_eq!(addr_to_page_idx(addr), 10);
+ assert_eq!(addr_to_page_idx(addr + 1), 10);
+ }
+
+ #[test]
+ fn test_page_idx_to_addr() {
+ assert_eq!(page_idx_to_addr(10), 10 * pagesize());
+ }
+
+ #[test]
+ fn test_page_base_addr() {
+ let addr = 10 * pagesize();
+ assert_eq!(page_base_addr(addr - 1), addr - pagesize());
+ assert_eq!(page_base_addr(addr), addr);
+ assert_eq!(page_base_addr(addr + 1), addr);
+ }
+
+ #[test]
+ fn test_is_page_aligned() {
+ let addr = 10 * pagesize();
+ assert!(!is_page_aligned(addr - 1));
+ assert!(is_page_aligned(addr));
+ assert!(!is_page_aligned(addr + 1));
+ }
+
+ #[test]
+ fn test_bytes_to_pages() {
+ assert_eq!(bytes_to_pages(10 * pagesize()), 10);
+ assert_eq!(bytes_to_pages(10 * pagesize() + 1), 10);
+ }
+
+ #[test]
+ fn test_pages_to_bytes() {
+ assert_eq!(pages_to_bytes(1), pagesize());
+ assert_eq!(pages_to_bytes(10), 10 * pagesize());
+ }
+}