Upgrade vm-memory to version 0.16.0
Bug: 381144621
Test: crate_tool regenerate vm-memory
Change-Id: I6b511a0ef16b9480c47c122f999002225bfefb66
diff --git a/crates/vm-memory/.android-checksum.json b/crates/vm-memory/.android-checksum.json
index 94030e9..29a4ffa 100644
--- a/crates/vm-memory/.android-checksum.json
+++ b/crates/vm-memory/.android-checksum.json
@@ -1 +1 @@
-{"package":null,"files":{".cargo-checksum.json":"639cd67924b921b8b681482bd96838fbb078fa93f4099f6d66d021b491d6bc45","Android.bp":"a29f30852bedb1aa03c07e85415f6e854c37058d83ad8233b4174d8dfc8a2cd2","CHANGELOG.md":"6c0aff1b96507e88de6191fddc4dfd1df6b0941b70dde8c7a3f401ff0e66a404","CODEOWNERS":"b80872e1aad5883a92ced4812545b49f6bcb2d473655c1698e32eda7f5a4848d","Cargo.toml":"0be73899a5956f16edda3f5ee4311e77fa8eba309d2695251efbd2a6e34a5d4e","DESIGN.md":"41889d956bceb38d7ac1e374d7359b84b2d9059ccfda2ca33fb81f3d6f117a20","LICENSE":"7c6512d88b3127990067585f24881ba1f182c5c49a04cb1975b226b7be95709e","LICENSE-APACHE":"7c6512d88b3127990067585f24881ba1f182c5c49a04cb1975b226b7be95709e","LICENSE-BSD-3-Clause":"6effa22075f387e9d0cc10ae8787537cbd3708d2f65d5dd618a3531c876d0c9b","METADATA":"dfa8b9d50b10db5d384ede0e65efdd9bd0bf7195f974f295aab2da3804fef226","MODULE_LICENSE_APACHE2":"0d6f8afa3940b7f06bebee651376d43bc8b0d5b437337be2696d30377451e93a","README.md":"c56ae3b9a4d090e4dd5c37046383e136202d40617f66ad939dd39fd7bdbf5334","TODO.md":"0bb26e4352358a5ec40fb12538fa3adec446528483ef512726025541ea3b5649","benches/guest_memory.rs":"11e14113506e9f1891f86d309721f32b09d3ecde142b0af69d8e2b52d1fe0293","benches/main.rs":"84c672f9a5029433e6e0d81d2fcded7b7ed3bf7ae68922b0adadce868e628cd8","benches/mmap/mod.rs":"446d0f33e9e86343c6384fc4b8a4e904c02b080ff0036caace328bc74c18728a","benches/volatile.rs":"1c7ade05cc51425d0fc4d85390051d1af48a68397115571c02ac8c94b0ecb0c1","cargo_embargo.json":"01c4ed2bc815ef7e524ce895aac53ec28fcbebd640dd300ebbc22197f924de99","coverage_config_aarch64.json":"0704342dbbd99d078a309818ccf7d7ad266579b8860c6842cffedbe9f6f2d360","coverage_config_x86_64.json":"8a8fa22ae287a64d050ef471dc2995731746be221ba3dd917ffaf3a3c3bc70aa","src/address.rs":"6d888eb5dbae506be5804d658b1ba0bf5d140b27c2d0ee57b93c41b175c8d789","src/atomic.rs":"ef2185e7b75a206ec3289b8c3292334dd649931111d217da8c564f20863593ef","src/atomic_integer.rs":"1f01c95a617b20ff4bb961d53ef062ec285fab1bddb7f1218c22803c8fa05167","src/bitmap/backend/atomic_bitmap.rs":"ae5dc7d10713b6a378cbf9b942fefa58b99bdf694608526164d7e5d58265686c","src/bitmap/backend/atomic_bitmap_arc.rs":"74ba74c4f971bb6d0e81813bfbf688b798acf9c28f45aab008c5b62d28337c51","src/bitmap/backend/mod.rs":"359c5b037cd3c4f3eaf0c11dbb7c6a4c2aa894a065c3837be6fa86079e0d0111","src/bitmap/backend/slice.rs":"1adc8f373b555347586f65e3107c6a05804632ccaf61593d8a70a5f8649f1d61","src/bitmap/mod.rs":"5e50042d75c6984b775dab2181b403644b4f4dfdfcf3cecf5024bd8d0115944d","src/bytes.rs":"270dc7c63a61dfa68dc02f0bb51a00f0399ff0cf2c5e09019295cd9faa353ed7","src/endian.rs":"5e630d9be4dda02161d5367abdf7b4289d114b240f31fb13d3018af8b19502b4","src/guest_memory.rs":"87c50e59eb99d0416b8ba503ed87978d67143d8d8b2410ec8a88fb2b66d8bf68","src/lib.rs":"6de220fd76760bf3c0b2656e38346605a0a2749dde787ceb412ebb3549795012","src/mmap.rs":"b8bb1e7d376f6968cd03d1dee8a0f2dc524912b105f5295089f300f7bbacd8d2","src/mmap_unix.rs":"a3624679f889236b1c1ad0ddfdb1653f12a8789f24479905fba6980a0e38a989","src/mmap_windows.rs":"9a465706f8501e150b1f188526f358e38a0cd8784e443746b289d8c029058591","src/mmap_xen.rs":"28c79b1ec258f5f13f0633ab523a90d9babd7773043d3ab5c219403a11b734c7","src/volatile_memory.rs":"673ecb93f949842b95d42ba659780dfb8956506e089341060815da9dcf68b6fd"}}
\ No newline at end of file
+{"package":null,"files":{".cargo-checksum.json":"03917ffe1ce829ec5a808258ab92fdcb50065ac6522e4f1f0237479cd18fceb5","Android.bp":"08707f005d2ba2040de5db3aef3dc085f459d07209d3dd18e0a143a4fcc8fc79","CHANGELOG.md":"e2fc531884ec91e971a2471be32246e45b74777f6bf3835c8a3901020b3ec195","CODEOWNERS":"b80872e1aad5883a92ced4812545b49f6bcb2d473655c1698e32eda7f5a4848d","Cargo.toml":"57845f3e44846d8b39ccf717a0ec9771811502439248226f3aa1d7f3bdf89346","DESIGN.md":"88c3aeec99c4cbabebec4e1ce70cfc482d90a0a3aa184c3fccfa5da675dbc549","LICENSE":"7c6512d88b3127990067585f24881ba1f182c5c49a04cb1975b226b7be95709e","LICENSE-APACHE":"7c6512d88b3127990067585f24881ba1f182c5c49a04cb1975b226b7be95709e","LICENSE-BSD-3-Clause":"6effa22075f387e9d0cc10ae8787537cbd3708d2f65d5dd618a3531c876d0c9b","METADATA":"b405c9b692b3ef4d64fedb3dda1df5ed91771a82589f0aea430ac387c6a2bd8a","MODULE_LICENSE_APACHE2":"0d6f8afa3940b7f06bebee651376d43bc8b0d5b437337be2696d30377451e93a","README.md":"e60865700f8b53093bec9b3d527fd5f70c7296066d28ba2557de852ebd6707e9","TODO.md":"8e17c765b47c3d395d8f4f31994c01ecd1c0fda50f305292901bfcd1488364bc","benches/guest_memory.rs":"11e14113506e9f1891f86d309721f32b09d3ecde142b0af69d8e2b52d1fe0293","benches/main.rs":"84c672f9a5029433e6e0d81d2fcded7b7ed3bf7ae68922b0adadce868e628cd8","benches/mmap/mod.rs":"76255dccc23dc194388a3e21006ee336663d63c770a212f2a28a803f8aaba14a","benches/volatile.rs":"1c7ade05cc51425d0fc4d85390051d1af48a68397115571c02ac8c94b0ecb0c1","cargo2android_module.bp.fragment":"1c632b6fda8f54d33a0ef9fa375661a2ae8e7399227291bb68d44960ea072d44","cargo_embargo.json":"77b4d03bc110c19d7faae8fc1a845c60ee8a18bdeb82a4109677a809d585b0c8","coverage_config_aarch64.json":"0704342dbbd99d078a309818ccf7d7ad266579b8860c6842cffedbe9f6f2d360","coverage_config_x86_64.json":"d448445ee92aa65d5b000433a1c67df320f2219d2f5bdc861f253085717fcbc8","src/address.rs":"452d68db3350d11662d3f0f4539c6b16a129b5c4feacc8a6506b0bcef8b3cffe","src/atomic.rs":"393bba5d8c72c369f20826cbf603468cbcc8c4087db1a3974f4187309a56009e","src/atomic_integer.rs":"1f62802fc7415e3bc4ee475afcd7846b63f35a87f138e103232e7711af07d834","src/bitmap/backend/atomic_bitmap.rs":"c8fbb5bbf876339b78bb0fb3fc57744f1ac33d844807f82c241cb06f89cabc79","src/bitmap/backend/atomic_bitmap_arc.rs":"c22adafb7ba095d1eb042267f7ed9e6eb0ab98202cb6f0dbf74167d72c925627","src/bitmap/backend/mod.rs":"bdfb9e91bae0328e7aa19f4ece70f7a18999836a7e95d084bf2c0945d0c15919","src/bitmap/backend/slice.rs":"1285932f4095d1ad20d56888b9664f65498517f20ea6eb978911e4d5e4011407","src/bitmap/mod.rs":"216fa29482e98526b6a4ebbb72d7ba962358c820ecbdb478e236df935e2d44ee","src/bytes.rs":"03a3f58c22848fafdbcd99abef5a9ce310dbd97a530499ccb9afa6be190f4d59","src/endian.rs":"5e630d9be4dda02161d5367abdf7b4289d114b240f31fb13d3018af8b19502b4","src/guest_memory.rs":"ec503eba34810309d9e8a0190522d6bb4dea4afab8744532bfa7e53eb097e1ab","src/io.rs":"b35a5d965e8417340acb616a5bbbb1c7393959f61be1ba32f0f120b96839a1f3","src/lib.rs":"145bc43cd01f941e60a995ac1311ad7830f11fa1b79ad6768d0bfaef4987399c","src/mmap.rs":"06f5b55b7845966dc94a3d9584dc278d3a9d107b33c67486dc5b089d5921df20","src/mmap_unix.rs":"6191ecfd522a4c834b88bb8c7e75a0ed9667cc61f2a007821545b1bb703a7b4f","src/mmap_windows.rs":"9a465706f8501e150b1f188526f358e38a0cd8784e443746b289d8c029058591","src/mmap_xen.rs":"6ea56e1334e35cd7d07eca47e444790605e186c0d036232c37cb742317d28d3f","src/volatile_memory.rs":"9796ecaf8a927e30b650138589f0888750a51bc4e98c743ed856b317f93067eb"}}
\ No newline at end of file
diff --git a/crates/vm-memory/.cargo-checksum.json b/crates/vm-memory/.cargo-checksum.json
index 9b35a58..8deb5db 100644
--- a/crates/vm-memory/.cargo-checksum.json
+++ b/crates/vm-memory/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"CHANGELOG.md":"68b774d7290cc0864d789ce9e1b8f85048a9a95beed248847172167a67cdb935","CODEOWNERS":"3969d4fa52acf29819098633aedfb2d46ccdb5b721357cecd7e7fdd884fa4b1b","Cargo.toml":"d3281fc40ad8c5892c1fcafd6b4aa9b59bde07fdc97874871999cf71b685e511","DESIGN.md":"392add56a05e8bf9ab7e527c52640054bd6a8cdb0780d00331d01617227d0b80","LICENSE-APACHE":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30","LICENSE-BSD-3-Clause":"a6d3ebd1c2f37d4fd83d0676621f695fc0cc2d8c6e646cdbb831b46e0650c208","README.md":"4db218bca015391e49025ada0362b9c4b296fb053df362a7533dc98da02439a0","TODO.md":"c844f03be6631843e90d13b3410df031b07ee16db4a3c7cbda7e89557e9be46b","benches/guest_memory.rs":"ea80023f341dac6ff5222f5e5c5f60b4ba0a56f149e9d8039250d088153f87d6","benches/main.rs":"1d29d29baf2f9dc2e00af68102eb868451acb6692b836c2f6ce92e58cff46a5f","benches/mmap/mod.rs":"d6eea32eee47b001060464de45348266c87430eeba60c77c41f62d4b68b54728","benches/volatile.rs":"cd3f8d8413498a570884b02587555efd94ed7caaac1a463e2f7013cbb11b9541","coverage_config_aarch64.json":"ca288852cecd24c77ab3a790f35eb621f3c7b9e9e18014f71776a37530ca0b8d","coverage_config_x86_64.json":"576a6fed2a3675181987087a2f12ca4727f58b50b25aa5bce373d2daf96c35a3","src/address.rs":"dee1e39710d17fdc30a7f562f2fe066a98d7edd88348911e65f6a6a98d1ccabe","src/atomic.rs":"592b73ed462aa2c9ad785c85a75b2b66bba44e88365a1006abab02063f79201d","src/atomic_integer.rs":"42f5262e2a1cef6683a0e62fdfb5f2f3873ef4284a6c0f8882dd34b0f076678d","src/bitmap/backend/atomic_bitmap.rs":"8c5846180c8e6fd209a7a60b79b628ddccf0af9dfb3da2106b59b16e782daff8","src/bitmap/backend/atomic_bitmap_arc.rs":"de4ff032f63f62b80e5de5657e435431ed8433c2f4f307ab040dc5b11884ee9e","src/bitmap/backend/mod.rs":"efdd3652c4050f52b3a9746520d67e23a57051dd57ed79118bc0e5cdbe1d3fb9","src/bitmap/backend/slice.rs":"4d20c7df4811e337216c720281181200516119d455c94132915c1e72aa7ea747","src/bitmap/mod.rs":"1926c28c659f3a2e2501f242e91d41a0e9a3c785fa989cc497eb431fbd4689fa","src/bytes.rs":"23e2f8d57ccdca8319beee0deb2cff0d43ed4fc171ffc074b0aec87f72271c40","src/endian.rs":"d954021a4640d8d2c35759466a910ac4d0e477d6cdda7c01d09f8c0fd1d51e8f","src/guest_memory.rs":"fd280efb0d0beaf7f93ca4ebd8463d129669f495e21f814674cd8a15445d4115","src/lib.rs":"18fa835266e378e87b9868e41702cbb9191c9f1db231f2d03f86e9fe9d1ada7d","src/mmap.rs":"a1bdb40c5b0bc3a68dba528ea237c17fbc1fd9e25bc623f7a7e978dfe9fb40d6","src/mmap_unix.rs":"7c07b17ff4f6892c295e395d262c1322def876958fa2636558040ebc2a6a2485","src/mmap_windows.rs":"df7dd2061903c294b21b404f0d6cb118fac36f1b64f59a53c0d002a129df86e4","src/mmap_xen.rs":"e8c4ee998dae5bcb1dcd442b35cf75817a0926d52870010b63b10176b804d22e","src/volatile_memory.rs":"f1c03e5e860004112287bacb5f295e6294ce684628a60b572b7ea055dbf0c3dd"},"package":"9dc276f0d00c17b9aeb584da0f1e1c673df0d183cc2539e3636ec8cbc5eae99b"}
\ No newline at end of file
+{"files":{"CHANGELOG.md":"e18756cf3d6b703a02d5cbb23c02275ee634b2aaed43f103e69e51146452d912","CODEOWNERS":"3969d4fa52acf29819098633aedfb2d46ccdb5b721357cecd7e7fdd884fa4b1b","Cargo.toml":"cf1789b28492fc501307a8f29ba11a8752d63150f7f718ebb9f6233d322e41b1","DESIGN.md":"35e9671628e330fbd62a0f00b8726cb09c82bda4779834a1b762ed9036f9330f","LICENSE-APACHE":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30","LICENSE-BSD-3-Clause":"a6d3ebd1c2f37d4fd83d0676621f695fc0cc2d8c6e646cdbb831b46e0650c208","README.md":"f9fd1d519777a1a052aaa11e54a79c0dc1781379d2406f800c10ed04283a20cd","TODO.md":"275c82047e4a1a1f8db0787839645220f11ca376776cb979fdb0ecb0a33fe781","benches/guest_memory.rs":"ea80023f341dac6ff5222f5e5c5f60b4ba0a56f149e9d8039250d088153f87d6","benches/main.rs":"1d29d29baf2f9dc2e00af68102eb868451acb6692b836c2f6ce92e58cff46a5f","benches/mmap/mod.rs":"39a71e023286ee63a5553325ea2815cf3e1390daa363708b82baaecacb468173","benches/volatile.rs":"cd3f8d8413498a570884b02587555efd94ed7caaac1a463e2f7013cbb11b9541","coverage_config_aarch64.json":"ca288852cecd24c77ab3a790f35eb621f3c7b9e9e18014f71776a37530ca0b8d","coverage_config_x86_64.json":"f4e604310360cbd48fd22f7ca1c63d53595f7fed44f3c1ccf4d112f5b5d2e52d","src/address.rs":"e73a5650d3bbbaa70b2d3fc90ef2fc226108750bcb5f119b6ab7629bab635946","src/atomic.rs":"ab89e700b3c3df62c4c4222c54e7d402d4a4fd6741988202233fabc5c594ad40","src/atomic_integer.rs":"470c14fc9c9f8a2425d4d1c78c638a41066564288f9c9fae8f83056e03589b86","src/bitmap/backend/atomic_bitmap.rs":"5ce709ff8b37d8d52201d6c5cb67e5a5a28a71c1011225ced4ddf42f8f3f2684","src/bitmap/backend/atomic_bitmap_arc.rs":"62dc5f1c6038b2a3c93566d55a3eb8e08941c033d5e3942f677097ae4f2b375c","src/bitmap/backend/mod.rs":"46918db372ee74f6fa510cc689c8d394e1821d568d388500259a2a16417b7ddc","src/bitmap/backend/slice.rs":"96d9f08fc7b425dd25fe83c048f73c89afb56776b2922f835c8ec22f83bf1ff3","src/bitmap/mod.rs":"a16b8c92da33154279853aa023f4cfc16a1af8da16a75f02f99cb14e691944be","src/bytes.rs":"22e8c7eb1a70da4ff90b0cc4bab5c55e6239dfacc33df244c47e0cbc0b2114bb","src/endian.rs":"d954021a4640d8d2c35759466a910ac4d0e477d6cdda7c01d09f8c0fd1d51e8f","src/guest_memory.rs":"bfeeb5c0796c96243d18cfe2a762c3856807b6410f0c141c3938e29004891b3d","src/io.rs":"70c4e26283808aaf7dc889722c13f361898c10ba8c70853881bcaff9c264ce43","src/lib.rs":"e43516bb2e1b1c4c70d45c325952c6c636bb3ea3175154d923908b97cfc1be48","src/mmap.rs":"29db8a95bcc95d1042fe1b63ac60d564855aadaacdc99d661f3ef521374be104","src/mmap_unix.rs":"f6aa6869492c476936124f926afa6a0d3a30c747e7f14343a69f99c8cee952e3","src/mmap_windows.rs":"df7dd2061903c294b21b404f0d6cb118fac36f1b64f59a53c0d002a129df86e4","src/mmap_xen.rs":"a2ea095dd282e66ef04fce226cea8d0803f690a252ef4db19b2851d2d1ab593d","src/volatile_memory.rs":"13d74fecf1e8c0f8c0009117c51bce6d6ebe9ec1bcc1f930ee6863e17b969d2e"},"package":"e2919f87420b6998a131eb7c78843890295e91a3f8f786ccc925c8d387b75121"}
\ No newline at end of file
diff --git a/crates/vm-memory/Android.bp b/crates/vm-memory/Android.bp
index 030e264..53ebc1c 100644
--- a/crates/vm-memory/Android.bp
+++ b/crates/vm-memory/Android.bp
@@ -17,7 +17,7 @@
name: "libvm_memory_android",
crate_name: "vm_memory",
cargo_env_compat: true,
- cargo_pkg_version: "0.12.2",
+ cargo_pkg_version: "0.16.0",
crate_root: "src/lib.rs",
edition: "2021",
features: [
@@ -32,4 +32,14 @@
"liblibc",
"libthiserror",
],
+ compile_multilib: "first",
+ arch: {
+ arm64: {
+ enabled: true,
+ },
+ x86_64: {
+ enabled: true,
+ },
+ },
+
}
diff --git a/crates/vm-memory/CHANGELOG.md b/crates/vm-memory/CHANGELOG.md
index a70cb03..2ce495c 100644
--- a/crates/vm-memory/CHANGELOG.md
+++ b/crates/vm-memory/CHANGELOG.md
@@ -1,153 +1,235 @@
# Changelog
-## [v0.12.2]
+## Upcoming version
+
+## \[v0.16.0\]
+
+### Added
+
+- \[[#287](https://github.com/rust-vmm/vm-memory/pull/287)\] Support for RISC-V 64-bit platform.
+- \[[#299](https://github.com/rust-vmm/vm-memory/pull/299)\] atomic_bitmap: support enlarging the bitmap.
+
+### Changed
+
+- \[[#278](https://github.com/rust-vmm/vm-memory/pull/278) Remove `GuestMemoryIterator` trait,
+ and instead have GuestMemory::iter() return `impl Iterator`.
+
+## \[v0.15.0\]
+
+### Added
+
+- \[[#270](https://github.com/rust-vmm/vm-memory/pull/270)\] atomic_bitmap: add capability to reset bits range
+- \[[#285](https://github.com/rust-vmm/vm-memory/pull/285)\] Annotated modules in lib.rs to indicate their feature
+ dependencies such that it is reflected in the docs, enhancing documentation clarity for users.
+
+### Changed
+
+- \[[#275](https://github.com/rust-vmm/vm-memory/pull/275)\] Fail builds on non 64-bit platforms.
### Fixed
-- [[#251]](https://github.com/rust-vmm/vm-memory/pull/251): Inserted checks
+
+- \[[#279](https://github.com/rust-vmm/vm-memory/pull/279)\] Remove restriction from `read_volatile_from` and `write_volatile_into`
+ that made it copy data it chunks of 4096.
+
+### Removed
+
+### Deprecated
+
+## \[v0.14.0\]
+
+### Added
+
+- \[[#266](https://github.com/rust-vmm/vm-memory/pull/266)\] Derive `Debug` for several
+ types that were missing it.
+
+### Changed
+
+- \[[#274](https://github.com/rust-vmm/vm-memory/pull/274)\] Drop `Default` as requirement for `ByteValued`.
+
+## \[v0.13.1\]
+
+### Added
+
+- \[[#256](https://github.com/rust-vmm/vm-memory/pull/256)\] Implement `WriteVolatile`
+ for `std::io::Stdout`.
+- \[[#256](https://github.com/rust-vmm/vm-memory/pull/256)\] Implement `WriteVolatile`
+ for `std::vec::Vec`.
+- \[[#256](https://github.com/rust-vmm/vm-memory/pull/256)\] Implement `WriteVolatile`
+ for `Cursor<&mut [u8]>`.
+- \[[#256](https://github.com/rust-vmm/vm-memory/pull/256)\] Implement `ReadVolatile`
+ for `Cursor<T: AsRef[u8]>`.
+
+## \[v0.13.0\]
+
+### Added
+
+- [\[#247\]](https://github.com/rust-vmm/vm-memory/pull/247) Add `ReadVolatile` and
+ `WriteVolatile` traits which are equivalents of `Read`/`Write` with volatile
+ access semantics.
+
+### Changed
+
+- [\[#247\]](https://github.com/rust-vmm/vm-memory/pull/247) Deprecate
+ `Bytes::{read_from, read_exact_from, write_to, write_all_to}`. Instead use
+ `ReadVolatile`/`WriteVolatile`, which do not incur the performance penalty
+ of copying to hypervisor memory due to `Read`/`Write` being incompatible
+ with volatile semantics (see also #217).
+
+## \[v0.12.2\]
+
+### Fixed
+
+- [\[#251\]](https://github.com/rust-vmm/vm-memory/pull/251): Inserted checks
that verify that the value returned by `VolatileMemory::get_slice` is of
the correct length.
### Deprecated
-- [[#244]](https://github.com/rust-vmm/vm-memory/pull/241) Deprecate volatile
+
+- [\[#244\]](https://github.com/rust-vmm/vm-memory/pull/241) Deprecate volatile
memory's `as_ptr()` interfaces. The new interfaces to be used instead are:
`ptr_guard()` and `ptr_guard_mut()`.
-## [v0.12.1]
+## \[v0.12.1\]
### Fixed
-- [[#241]](https://github.com/rust-vmm/vm-memory/pull/245) mmap_xen: Don't drop
+
+- [\[#241\]](https://github.com/rust-vmm/vm-memory/pull/245) mmap_xen: Don't drop
the FileOffset while in use #245
-## [v0.12.0]
+## \[v0.12.0\]
### Added
-- [[#241]](https://github.com/rust-vmm/vm-memory/pull/241) Add Xen memory
+
+- [\[#241\]](https://github.com/rust-vmm/vm-memory/pull/241) Add Xen memory
mapping support: Foreign and Grant. Add new API for accessing pointers to
volatile slices, as `as_ptr()` can't be used with Xen's Grant mapping.
-- [[#237]](https://github.com/rust-vmm/vm-memory/pull/237) Implement `ByteValued` for `i/u128`.
+- [\[#237\]](https://github.com/rust-vmm/vm-memory/pull/237) Implement `ByteValued` for `i/u128`.
-## [v0.11.0]
+## \[v0.11.0\]
### Added
-- [[#216]](https://github.com/rust-vmm/vm-memory/pull/216) Add `GuestRegionMmap::from_region`.
+
+- [\[#216\]](https://github.com/rust-vmm/vm-memory/pull/216) Add `GuestRegionMmap::from_region`.
### Fixed
-- [[#217]](https://github.com/rust-vmm/vm-memory/pull/217) Fix vm-memory internally
- taking rust-style slices to guest memory in ways that could potentially cause
+
+- [\[#217\]](https://github.com/rust-vmm/vm-memory/pull/217) Fix vm-memory internally
+ taking rust-style slices to guest memory in ways that could potentially cause
undefined behavior. Removes/deprecates various `as_slice`/`as_slice_mut` methods
- whose usage violated rust's aliasing rules, as well as an unsound
+ whose usage violated rust's aliasing rules, as well as an unsound
`impl<'a> VolatileMemory for &'a mut [u8]`.
-## [v0.10.0]
+## \[v0.10.0\]
### Changed
-- [[#208]](https://github.com/rust-vmm/vm-memory/issues/208) Updated
+
+- [\[#208\]](https://github.com/rust-vmm/vm-memory/issues/208) Updated
vmm-sys-util dependency to v0.11.0
-- [[#203]](https://github.com/rust-vmm/vm-memory/pull/203) Switched to Rust
+- [\[#203\]](https://github.com/rust-vmm/vm-memory/pull/203) Switched to Rust
edition 2021.
-## [v0.9.0]
+## \[v0.9.0\]
### Fixed
-- [[#195]](https://github.com/rust-vmm/vm-memory/issues/195):
+- [\[#195\]](https://github.com/rust-vmm/vm-memory/issues/195):
`mmap::check_file_offset` is doing the correct size validation for block and
char devices as well.
### Changed
-- [[#198]](https://github.com/rust-vmm/vm-memory/pull/198): atomic: enable 64
+- [\[#198\]](https://github.com/rust-vmm/vm-memory/pull/198): atomic: enable 64
bit atomics on ppc64le and s390x.
-- [[#200]](https://github.com/rust-vmm/vm-memory/pull/200): docs: enable all
+- [\[#200\]](https://github.com/rust-vmm/vm-memory/pull/200): docs: enable all
features in `docs.rs`.
-- [[#199]](https://github.com/rust-vmm/vm-memory/issues/199): Update the way
+- [\[#199\]](https://github.com/rust-vmm/vm-memory/issues/199): Update the way
the dependencies are pulled such that we don't end up with incompatible
versions.
-## [v0.8.0]
+## \[v0.8.0\]
### Fixed
-- [[#190]](https://github.com/rust-vmm/vm-memory/pull/190):
+- [\[#190\]](https://github.com/rust-vmm/vm-memory/pull/190):
`VolatileSlice::read/write` when input slice is empty.
-## [v0.7.0]
+## \[v0.7.0\]
### Changed
-- [[#176]](https://github.com/rust-vmm/vm-memory/pull/176): Relax the trait
+- [\[#176\]](https://github.com/rust-vmm/vm-memory/pull/176): Relax the trait
bounds of `Bytes` auto impl for `T: GuestMemory`
-- [[#178]](https://github.com/rust-vmm/vm-memory/pull/178):
+- [\[#178\]](https://github.com/rust-vmm/vm-memory/pull/178):
`MmapRegion::build_raw` no longer requires that the length of the region is a
multiple of the page size.
-## [v0.6.0]
+## \[v0.6.0\]
### Added
- - [[#160]](https://github.com/rust-vmm/vm-memory/pull/160): Add `ArcRef` and `AtomicBitmapArc` bitmap
- backend implementations.
- - [[#149]](https://github.com/rust-vmm/vm-memory/issues/149): Implement builder for MmapRegion.
- - [[#140]](https://github.com/rust-vmm/vm-memory/issues/140): Add dirty bitmap tracking abstractions.
+- [\[#160\]](https://github.com/rust-vmm/vm-memory/pull/160): Add `ArcRef` and `AtomicBitmapArc` bitmap
+ backend implementations.
+- [\[#149\]](https://github.com/rust-vmm/vm-memory/issues/149): Implement builder for MmapRegion.
+- [\[#140\]](https://github.com/rust-vmm/vm-memory/issues/140): Add dirty bitmap tracking abstractions.
-### Deprecated
+### Deprecated
- - [[#133]](https://github.com/rust-vmm/vm-memory/issues/8): Deprecate `GuestMemory::with_regions()`,
- `GuestMemory::with_regions_mut()`, `GuestMemory::map_and_fold()`.
+- [\[#133\]](https://github.com/rust-vmm/vm-memory/issues/8): Deprecate `GuestMemory::with_regions()`,
+ `GuestMemory::with_regions_mut()`, `GuestMemory::map_and_fold()`.
-## [v0.5.0]
+## \[v0.5.0\]
### Added
-- [[#8]](https://github.com/rust-vmm/vm-memory/issues/8): Add GuestMemory method to return an Iterator
-- [[#120]](https://github.com/rust-vmm/vm-memory/pull/120): Add is_hugetlbfs() to GuestMemoryRegion
-- [[#126]](https://github.com/rust-vmm/vm-memory/pull/126): Add VolatileSlice::split_at()
-- [[#128]](https://github.com/rust-vmm/vm-memory/pull/128): Add VolatileSlice::subslice()
+- [\[#8\]](https://github.com/rust-vmm/vm-memory/issues/8): Add GuestMemory method to return an Iterator
+- [\[#120\]](https://github.com/rust-vmm/vm-memory/pull/120): Add is_hugetlbfs() to GuestMemoryRegion
+- [\[#126\]](https://github.com/rust-vmm/vm-memory/pull/126): Add VolatileSlice::split_at()
+- [\[#128\]](https://github.com/rust-vmm/vm-memory/pull/128): Add VolatileSlice::subslice()
-## [v0.4.0]
+## \[v0.4.0\]
### Fixed
-- [[#100]](https://github.com/rust-vmm/vm-memory/issues/100): Performance
+- [\[#100\]](https://github.com/rust-vmm/vm-memory/issues/100): Performance
degradation after fixing [#95](https://github.com/rust-vmm/vm-memory/pull/95).
-- [[#122]](https://github.com/rust-vmm/vm-memory/pull/122): atomic,
+- [\[#122\]](https://github.com/rust-vmm/vm-memory/pull/122): atomic,
Cargo.toml: Update for arc-swap 1.0.0.
-## [v0.3.0]
+## \[v0.3.0\]
### Added
-- [[#109]](https://github.com/rust-vmm/vm-memory/pull/109): Added `build_raw` to
+- [\[#109\]](https://github.com/rust-vmm/vm-memory/pull/109): Added `build_raw` to
`MmapRegion` which can be used to operate on externally created mappings.
-- [[#101]](https://github.com/rust-vmm/vm-memory/pull/101): Added `check_range` for
+- [\[#101\]](https://github.com/rust-vmm/vm-memory/pull/101): Added `check_range` for
GuestMemory which could be used to validate a range of guest memory.
-- [[#115]](https://github.com/rust-vmm/vm-memory/pull/115): Add methods for atomic
+- [\[#115\]](https://github.com/rust-vmm/vm-memory/pull/115): Add methods for atomic
access to `Bytes`.
### Fixed
-- [[#93]](https://github.com/rust-vmm/vm-memory/issues/93): DoS issue when using
+- [\[#93\]](https://github.com/rust-vmm/vm-memory/issues/93): DoS issue when using
virtio with rust-vmm/vm-memory.
-- [[#106]](https://github.com/rust-vmm/vm-memory/issues/106): Asserts trigger
- on zero-length access.
+- [\[#106\]](https://github.com/rust-vmm/vm-memory/issues/106): Asserts trigger
+ on zero-length access.
### Removed
- `integer-atomics` is no longer a distinct feature of the crate.
-## [v0.2.0]
+## \[v0.2.0\]
### Added
-- [[#76]](https://github.com/rust-vmm/vm-memory/issues/76): Added `get_slice` and
+- [\[#76\]](https://github.com/rust-vmm/vm-memory/issues/76): Added `get_slice` and
`as_volatile_slice` to `GuestMemoryRegion`.
-- [[#82]](https://github.com/rust-vmm/vm-memory/issues/82): Added `Clone` bound
+- [\[#82\]](https://github.com/rust-vmm/vm-memory/issues/82): Added `Clone` bound
for `GuestAddressSpace::T`, the return value of `GuestAddressSpace::memory()`.
-- [[#88]](https://github.com/rust-vmm/vm-memory/issues/88): Added `as_bytes` for
+- [\[#88\]](https://github.com/rust-vmm/vm-memory/issues/88): Added `as_bytes` for
`ByteValued` which can be used for reading into POD structures from
raw bytes.
-## [v0.1.0]
+## \[v0.1.0\]
### Added
diff --git a/crates/vm-memory/Cargo.toml b/crates/vm-memory/Cargo.toml
index 5081971..dbc90da 100644
--- a/crates/vm-memory/Cargo.toml
+++ b/crates/vm-memory/Cargo.toml
@@ -12,8 +12,12 @@
[package]
edition = "2021"
name = "vm-memory"
-version = "0.12.2"
+version = "0.16.0"
authors = ["Liu Jiang <gerry@linux.alibaba.com>"]
+build = false
+autobins = false
+autoexamples = false
+autotests = false
autobenches = false
description = "Safe abstractions for accessing the VM physical memory"
readme = "README.md"
@@ -24,13 +28,22 @@
[package.metadata.docs.rs]
all-features = true
+rustdoc-args = [
+ "--cfg",
+ "docsrs",
+]
[profile.bench]
lto = true
codegen-units = 1
+[lib]
+name = "vm_memory"
+path = "src/lib.rs"
+
[[bench]]
name = "main"
+path = "benches/main.rs"
harness = false
[dependencies.arc-swap]
@@ -38,7 +51,7 @@
optional = true
[dependencies.bitflags]
-version = "1.0"
+version = "2.4.0"
optional = true
[dependencies.libc]
@@ -48,17 +61,17 @@
version = "1.0.40"
[dependencies.vmm-sys-util]
-version = "0.11.0"
+version = "0.12.1"
optional = true
[dev-dependencies.criterion]
-version = "0.3.0"
+version = "0.5.0"
[dev-dependencies.matches]
version = "0.1.0"
[dev-dependencies.vmm-sys-util]
-version = "0.11.0"
+version = "0.12.1"
[features]
backend-atomic = ["arc-swap"]
diff --git a/crates/vm-memory/DESIGN.md b/crates/vm-memory/DESIGN.md
index 1e420e8..5915f50 100644
--- a/crates/vm-memory/DESIGN.md
+++ b/crates/vm-memory/DESIGN.md
@@ -39,7 +39,7 @@
- [Abstraction of Address Space](#abstraction-of-address-space)
- [Specialization for Virtual Machine Physical Address Space](#specialization-for-virtual-machine-physical-address-space)
-- [Backend Implementation Based on `mmap`](#backend-implementation-based-on-`mmap`)
+- [Backend Implementation Based on `mmap`](#backend-implementation-based-on-mmap)
- [Utilities and helpers](#utilities-and-helpers)
### Address Space Abstraction
@@ -48,9 +48,9 @@
with addresses as follows:
- `AddressValue`: stores the raw value of an address. Typically `u32`, `u64` or
- `usize` are used to store the raw value. Pointers such as `*u8`, can not be
- used as an implementation of `AddressValue` because the `Add` and `Sub`
- traits are not implemented for that type.
+ `usize` are used to store the raw value. Pointers such as `*u8`, can not be
+ used as an implementation of `AddressValue` because the `Add` and `Sub`
+ traits are not implemented for that type.
- `Address`: implementation of `AddressValue`.
- `Bytes`: trait for volatile access to memory. The `Bytes` trait can be
parameterized with types that represent addresses, in order to enforce that
diff --git a/crates/vm-memory/METADATA b/crates/vm-memory/METADATA
index d76b1ba..dc0d980 100644
--- a/crates/vm-memory/METADATA
+++ b/crates/vm-memory/METADATA
@@ -1,17 +1,17 @@
name: "vm-memory"
description: "Safe abstractions for accessing the VM physical memory"
third_party {
- version: "0.12.2"
+ version: "0.16.0"
license_type: NOTICE
last_upgrade_date {
- year: 2023
- month: 9
- day: 6
+ year: 2025
+ month: 1
+ day: 4
}
homepage: "https://crates.io/crates/vm-memory"
identifier {
type: "Archive"
- value: "https://static.crates.io/crates/vm-memory/vm-memory-0.12.2.crate"
- version: "0.12.2"
+ value: "https://static.crates.io/crates/vm-memory/vm-memory-0.16.0.crate"
+ version: "0.16.0"
}
}
diff --git a/crates/vm-memory/README.md b/crates/vm-memory/README.md
index 07e55ee..b390caf 100644
--- a/crates/vm-memory/README.md
+++ b/crates/vm-memory/README.md
@@ -18,7 +18,7 @@
### Platform Support
-- Arch: x86, AMD64, ARM64
+- Arch: x86_64, ARM64, RISCV64
- OS: Linux/Unix/Windows
### Xen support
diff --git a/crates/vm-memory/TODO.md b/crates/vm-memory/TODO.md
index e52bb07..3552f7e 100644
--- a/crates/vm-memory/TODO.md
+++ b/crates/vm-memory/TODO.md
@@ -1,3 +1,4 @@
### TODO List
+
- Abstraction layer to seperate VM memory management from VM memory accessor.
- Help needed to refine documentation and usage examples.
diff --git a/crates/vm-memory/benches/mmap/mod.rs b/crates/vm-memory/benches/mmap/mod.rs
index ed15e18..bbf3ab3 100644
--- a/crates/vm-memory/benches/mmap/mod.rs
+++ b/crates/vm-memory/benches/mmap/mod.rs
@@ -8,7 +8,6 @@
extern crate vm_memory;
use std::fs::{File, OpenOptions};
-use std::io::Cursor;
use std::mem::size_of;
use std::path::Path;
@@ -105,7 +104,7 @@
c.bench_function(format!("read_from_{:#0X}", offset).as_str(), |b| {
b.iter(|| {
black_box(&memory)
- .read_from(address, &mut Cursor::new(&image), ACCESS_SIZE)
+ .read_volatile_from(address, &mut image.as_slice(), ACCESS_SIZE)
.unwrap()
})
});
@@ -113,7 +112,7 @@
c.bench_function(format!("read_from_file_{:#0X}", offset).as_str(), |b| {
b.iter(|| {
black_box(&memory)
- .read_from(address, &mut file, ACCESS_SIZE)
+ .read_volatile_from(address, &mut file, ACCESS_SIZE)
.unwrap()
})
});
@@ -121,7 +120,7 @@
c.bench_function(format!("read_exact_from_{:#0X}", offset).as_str(), |b| {
b.iter(|| {
black_box(&memory)
- .read_exact_from(address, &mut Cursor::new(&mut image), ACCESS_SIZE)
+ .read_exact_volatile_from(address, &mut image.as_slice(), ACCESS_SIZE)
.unwrap()
})
});
@@ -154,7 +153,7 @@
c.bench_function(format!("write_to_{:#0X}", offset).as_str(), |b| {
b.iter(|| {
black_box(&memory)
- .write_to(address, &mut Cursor::new(&mut image), ACCESS_SIZE)
+ .write_volatile_to(address, &mut image.as_mut_slice(), ACCESS_SIZE)
.unwrap()
})
});
@@ -162,7 +161,7 @@
c.bench_function(format!("write_to_file_{:#0X}", offset).as_str(), |b| {
b.iter(|| {
black_box(&memory)
- .write_to(address, &mut file_to_write, ACCESS_SIZE)
+ .write_volatile_to(address, &mut file_to_write, ACCESS_SIZE)
.unwrap()
})
});
@@ -170,7 +169,7 @@
c.bench_function(format!("write_exact_to_{:#0X}", offset).as_str(), |b| {
b.iter(|| {
black_box(&memory)
- .write_all_to(address, &mut Cursor::new(&mut image), ACCESS_SIZE)
+ .write_all_volatile_to(address, &mut image.as_mut_slice(), ACCESS_SIZE)
.unwrap()
})
});
diff --git a/crates/vm-memory/cargo2android_module.bp.fragment b/crates/vm-memory/cargo2android_module.bp.fragment
new file mode 100644
index 0000000..04a9ab3
--- /dev/null
+++ b/crates/vm-memory/cargo2android_module.bp.fragment
@@ -0,0 +1,8 @@
+arch: {
+ arm64: {
+ enabled: true,
+ },
+ x86_64: {
+ enabled: true,
+ },
+}
diff --git a/crates/vm-memory/cargo_embargo.json b/crates/vm-memory/cargo_embargo.json
index 087fd61..ef818be 100644
--- a/crates/vm-memory/cargo_embargo.json
+++ b/crates/vm-memory/cargo_embargo.json
@@ -11,7 +11,9 @@
},
"package": {
"vm-memory": {
- "device_supported": false
+ "device_supported": false,
+ "host_first_multilib": true,
+ "add_module_block": "cargo2android_module.bp.fragment"
}
},
"run_cargo": false
diff --git a/crates/vm-memory/coverage_config_x86_64.json b/crates/vm-memory/coverage_config_x86_64.json
index a6a09c4..003b2d7 100644
--- a/crates/vm-memory/coverage_config_x86_64.json
+++ b/crates/vm-memory/coverage_config_x86_64.json
@@ -1,5 +1,5 @@
{
- "coverage_score": 92.2,
+ "coverage_score": 89.16,
"exclude_path": "mmap_windows.rs",
"crate_features": "backend-mmap,backend-atomic,backend-bitmap"
}
diff --git a/crates/vm-memory/src/address.rs b/crates/vm-memory/src/address.rs
index 350a186..639e226 100644
--- a/crates/vm-memory/src/address.rs
+++ b/crates/vm-memory/src/address.rs
@@ -12,11 +12,11 @@
//!
//! Two traits are defined to represent an address within an address space:
//! - [`AddressValue`](trait.AddressValue.html): stores the raw value of an address. Typically
-//! `u32`,`u64` or `usize` is used to store the raw value. But pointers, such as `*u8`, can't be used
-//! because they don't implement the [`Add`](https://doc.rust-lang.org/std/ops/trait.Add.html) and
-//! [`Sub`](https://doc.rust-lang.org/std/ops/trait.Sub.html) traits.
+//! `u32`,`u64` or `usize` is used to store the raw value. But pointers, such as `*u8`, can't be used
+//! because they don't implement the [`Add`](https://doc.rust-lang.org/std/ops/trait.Add.html) and
+//! [`Sub`](https://doc.rust-lang.org/std/ops/trait.Sub.html) traits.
//! - [Address](trait.Address.html): encapsulates an [`AddressValue`](trait.AddressValue.html)
-//! object and defines methods to access and manipulate it.
+//! object and defines methods to access and manipulate it.
use std::cmp::{Eq, Ord, PartialEq, PartialOrd};
use std::fmt::Debug;
@@ -243,7 +243,7 @@
#[test]
fn test_new() {
assert_eq!(MockAddress::new(0), MockAddress(0));
- assert_eq!(MockAddress::new(std::u64::MAX), MockAddress(std::u64::MAX));
+ assert_eq!(MockAddress::new(u64::MAX), MockAddress(u64::MAX));
}
#[test]
@@ -285,7 +285,7 @@
Some(MockAddress(0x130))
);
assert_eq!(
- MockAddress::new(std::u64::MAX - 0x3fff).checked_align_up(0x10000),
+ MockAddress::new(u64::MAX - 0x3fff).checked_align_up(0x10000),
None
);
}
@@ -343,10 +343,10 @@
// normal case
check_add(10, 10, false, 20);
// edge case
- check_add(std::u64::MAX - 1, 1, false, std::u64::MAX);
+ check_add(u64::MAX - 1, 1, false, u64::MAX);
// with overflow
- check_add(std::u64::MAX, 1, true, 0);
+ check_add(u64::MAX, 1, true, 0);
}
fn check_sub(a: u64, b: u64, expected_overflow: bool, expected_result: u64) {
@@ -384,7 +384,7 @@
check_sub(1, 1, false, 0);
// with underflow
- check_sub(0, 1, true, std::u64::MAX);
+ check_sub(0, 1, true, u64::MAX);
}
#[test]
diff --git a/crates/vm-memory/src/atomic.rs b/crates/vm-memory/src/atomic.rs
index ae10224..4b20b2c 100644
--- a/crates/vm-memory/src/atomic.rs
+++ b/crates/vm-memory/src/atomic.rs
@@ -124,6 +124,7 @@
/// this structure is dropped (falls out of scope) the lock will be unlocked,
/// possibly after updating the memory map represented by the
/// `GuestMemoryAtomic` that created the guard.
+#[derive(Debug)]
pub struct GuestMemoryExclusiveGuard<'a, M: GuestMemory> {
parent: &'a GuestMemoryAtomic<M>,
_guard: MutexGuard<'a, ()>,
diff --git a/crates/vm-memory/src/atomic_integer.rs b/crates/vm-memory/src/atomic_integer.rs
index 1b55c81..72ebc48 100644
--- a/crates/vm-memory/src/atomic_integer.rs
+++ b/crates/vm-memory/src/atomic_integer.rs
@@ -60,7 +60,8 @@
target_arch = "x86_64",
target_arch = "aarch64",
target_arch = "powerpc64",
- target_arch = "s390x"
+ target_arch = "s390x",
+ target_arch = "riscv64"
))]
impl_atomic_integer_ops!(std::sync::atomic::AtomicI64, i64);
@@ -71,7 +72,8 @@
target_arch = "x86_64",
target_arch = "aarch64",
target_arch = "powerpc64",
- target_arch = "s390x"
+ target_arch = "s390x",
+ target_arch = "riscv64"
))]
impl_atomic_integer_ops!(std::sync::atomic::AtomicU64, u64);
diff --git a/crates/vm-memory/src/bitmap/backend/atomic_bitmap.rs b/crates/vm-memory/src/bitmap/backend/atomic_bitmap.rs
index b3340c3..b163043 100644
--- a/crates/vm-memory/src/bitmap/backend/atomic_bitmap.rs
+++ b/crates/vm-memory/src/bitmap/backend/atomic_bitmap.rs
@@ -3,6 +3,7 @@
//! Bitmap backend implementation based on atomic integers.
+use std::num::NonZeroUsize;
use std::sync::atomic::{AtomicU64, Ordering};
use crate::bitmap::{Bitmap, RefSlice, WithBitmapSlice};
@@ -17,30 +18,36 @@
pub struct AtomicBitmap {
map: Vec<AtomicU64>,
size: usize,
- page_size: usize,
+ byte_size: usize,
+ page_size: NonZeroUsize,
}
#[allow(clippy::len_without_is_empty)]
impl AtomicBitmap {
/// Create a new bitmap of `byte_size`, with one bit per page. This is effectively
/// rounded up, and we get a new vector of the next multiple of 64 bigger than `bit_size`.
- pub fn new(byte_size: usize, page_size: usize) -> Self {
- let mut num_pages = byte_size / page_size;
- if byte_size % page_size > 0 {
- num_pages += 1;
- }
-
- // Adding one entry element more just in case `num_pages` is not a multiple of `64`.
- let map_size = num_pages / 64 + 1;
+ pub fn new(byte_size: usize, page_size: NonZeroUsize) -> Self {
+ let num_pages = byte_size.div_ceil(page_size.get());
+ let map_size = num_pages.div_ceil(u64::BITS as usize);
let map: Vec<AtomicU64> = (0..map_size).map(|_| AtomicU64::new(0)).collect();
AtomicBitmap {
map,
size: num_pages,
+ byte_size,
page_size,
}
}
+ /// Enlarge this bitmap with enough bits to track `additional_size` additional bytes at page granularity.
+ /// New bits are initialized to zero.
+ pub fn enlarge(&mut self, additional_size: usize) {
+ self.byte_size += additional_size;
+ self.size = self.byte_size.div_ceil(self.page_size.get());
+ let map_size = self.size.div_ceil(u64::BITS as usize);
+ self.map.resize_with(map_size, Default::default);
+ }
+
/// Is bit `n` set? Bits outside the range of the bitmap are always unset.
pub fn is_bit_set(&self, index: usize) -> bool {
if index < self.size {
@@ -60,6 +67,14 @@
/// is for the page corresponding to `start_addr`, and the last bit that we set corresponds
/// to address `start_addr + len - 1`.
pub fn set_addr_range(&self, start_addr: usize, len: usize) {
+ self.set_reset_addr_range(start_addr, len, true);
+ }
+
+ // Set/Reset a range of `len` bytes starting at `start_addr`
+ // reset parameter determines whether bit will be set/reset
+ // if set is true then the range of bits will be set to one,
+ // otherwise zero
+ fn set_reset_addr_range(&self, start_addr: usize, len: usize, set: bool) {
// Return early in the unlikely event that `len == 0` so the `len - 1` computation
// below does not underflow.
if len == 0 {
@@ -75,15 +90,49 @@
// Attempts to set bits beyond the end of the bitmap are simply ignored.
break;
}
- self.map[n >> 6].fetch_or(1 << (n & 63), Ordering::SeqCst);
+ if set {
+ self.map[n >> 6].fetch_or(1 << (n & 63), Ordering::SeqCst);
+ } else {
+ self.map[n >> 6].fetch_and(!(1 << (n & 63)), Ordering::SeqCst);
+ }
}
}
+ /// Reset a range of `len` bytes starting at `start_addr`. The first bit set in the bitmap
+ /// is for the page corresponding to `start_addr`, and the last bit that we set corresponds
+ /// to address `start_addr + len - 1`.
+ pub fn reset_addr_range(&self, start_addr: usize, len: usize) {
+ self.set_reset_addr_range(start_addr, len, false);
+ }
+
+ /// Set bit to corresponding index
+ pub fn set_bit(&self, index: usize) {
+ if index >= self.size {
+ // Attempts to set bits beyond the end of the bitmap are simply ignored.
+ return;
+ }
+ self.map[index >> 6].fetch_or(1 << (index & 63), Ordering::SeqCst);
+ }
+
+ /// Reset bit to corresponding index
+ pub fn reset_bit(&self, index: usize) {
+ if index >= self.size {
+ // Attempts to reset bits beyond the end of the bitmap are simply ignored.
+ return;
+ }
+ self.map[index >> 6].fetch_and(!(1 << (index & 63)), Ordering::SeqCst);
+ }
+
/// Get the length of the bitmap in bits (i.e. in how many pages it can represent).
pub fn len(&self) -> usize {
self.size
}
+ /// Get the size in bytes i.e how many bytes the bitmap can represent, one bit per page.
+ pub fn byte_size(&self) -> usize {
+ self.byte_size
+ }
+
/// Atomically get and reset the dirty page bitmap.
pub fn get_and_reset(&self) -> Vec<u64> {
self.map
@@ -111,6 +160,7 @@
AtomicBitmap {
map,
size: self.size,
+ byte_size: self.byte_size,
page_size: self.page_size,
}
}
@@ -136,38 +186,35 @@
impl Default for AtomicBitmap {
fn default() -> Self {
- AtomicBitmap::new(0, 0x1000)
+ // SAFETY: Safe as `0x1000` is non-zero.
+ AtomicBitmap::new(0, unsafe { NonZeroUsize::new_unchecked(0x1000) })
}
}
#[cfg(feature = "backend-mmap")]
impl NewBitmap for AtomicBitmap {
fn with_len(len: usize) -> Self {
- let page_size;
-
#[cfg(unix)]
- {
- // SAFETY: There's no unsafe potential in calling this function.
- page_size = unsafe { libc::sysconf(libc::_SC_PAGE_SIZE) };
- }
+ // SAFETY: There's no unsafe potential in calling this function.
+ let page_size = unsafe { libc::sysconf(libc::_SC_PAGE_SIZE) };
#[cfg(windows)]
- {
+ let page_size = {
use winapi::um::sysinfoapi::{GetSystemInfo, LPSYSTEM_INFO, SYSTEM_INFO};
-
- // It's safe to initialize this object from a zeroed memory region.
- let mut sysinfo: SYSTEM_INFO = unsafe { std::mem::zeroed() };
-
- // It's safe to call this method as the pointer is based on the address
- // of the previously initialized `sysinfo` object.
- unsafe { GetSystemInfo(&mut sysinfo as LPSYSTEM_INFO) };
-
- page_size = sysinfo.dwPageSize;
- }
+ let mut sysinfo = MaybeUninit::zeroed();
+ // SAFETY: It's safe to call `GetSystemInfo` as `sysinfo` is rightly sized
+ // allocated memory.
+ unsafe { GetSystemInfo(sysinfo.as_mut_ptr()) };
+ // SAFETY: It's safe to call `assume_init` as `GetSystemInfo` initializes `sysinfo`.
+ unsafe { sysinfo.assume_init().dwPageSize }
+ };
// The `unwrap` is safe to use because the above call should always succeed on the
// supported platforms, and the size of a page will always fit within a `usize`.
- AtomicBitmap::new(len, usize::try_from(page_size).unwrap())
+ AtomicBitmap::new(
+ len,
+ NonZeroUsize::try_from(usize::try_from(page_size).unwrap()).unwrap(),
+ )
}
}
@@ -177,13 +224,16 @@
use crate::bitmap::tests::test_bitmap;
+ #[allow(clippy::undocumented_unsafe_blocks)]
+ const DEFAULT_PAGE_SIZE: NonZeroUsize = unsafe { NonZeroUsize::new_unchecked(128) };
+
#[test]
fn test_bitmap_basic() {
// Test that bitmap size is properly rounded up.
- let a = AtomicBitmap::new(1025, 128);
+ let a = AtomicBitmap::new(1025, DEFAULT_PAGE_SIZE);
assert_eq!(a.len(), 9);
- let b = AtomicBitmap::new(1024, 128);
+ let b = AtomicBitmap::new(1024, DEFAULT_PAGE_SIZE);
assert_eq!(b.len(), 8);
b.set_addr_range(128, 129);
assert!(!b.is_addr_set(0));
@@ -213,8 +263,25 @@
}
#[test]
+ fn test_bitmap_reset() {
+ let b = AtomicBitmap::new(1024, DEFAULT_PAGE_SIZE);
+ assert_eq!(b.len(), 8);
+ b.set_addr_range(128, 129);
+ assert!(!b.is_addr_set(0));
+ assert!(b.is_addr_set(128));
+ assert!(b.is_addr_set(256));
+ assert!(!b.is_addr_set(384));
+
+ b.reset_addr_range(128, 129);
+ assert!(!b.is_addr_set(0));
+ assert!(!b.is_addr_set(128));
+ assert!(!b.is_addr_set(256));
+ assert!(!b.is_addr_set(384));
+ }
+
+ #[test]
fn test_bitmap_out_of_range() {
- let b = AtomicBitmap::new(1024, 1);
+ let b = AtomicBitmap::new(1024, NonZeroUsize::MIN);
// Set a partial range that goes beyond the end of the bitmap
b.set_addr_range(768, 512);
assert!(b.is_addr_set(768));
@@ -225,7 +292,47 @@
#[test]
fn test_bitmap_impl() {
- let b = AtomicBitmap::new(0x2000, 128);
+ let b = AtomicBitmap::new(0x800, DEFAULT_PAGE_SIZE);
test_bitmap(&b);
}
+
+ #[test]
+ fn test_bitmap_enlarge() {
+ let mut b = AtomicBitmap::new(8 * 1024, DEFAULT_PAGE_SIZE);
+ assert_eq!(b.len(), 64);
+ b.set_addr_range(128, 129);
+ assert!(!b.is_addr_set(0));
+ assert!(b.is_addr_set(128));
+ assert!(b.is_addr_set(256));
+ assert!(!b.is_addr_set(384));
+
+ b.reset_addr_range(128, 129);
+ assert!(!b.is_addr_set(0));
+ assert!(!b.is_addr_set(128));
+ assert!(!b.is_addr_set(256));
+ assert!(!b.is_addr_set(384));
+ b.set_addr_range(128, 129);
+ b.enlarge(8 * 1024);
+ for i in 65..128 {
+ assert!(!b.is_bit_set(i));
+ }
+ assert_eq!(b.len(), 128);
+ assert!(!b.is_addr_set(0));
+ assert!(b.is_addr_set(128));
+ assert!(b.is_addr_set(256));
+ assert!(!b.is_addr_set(384));
+
+ b.set_bit(55);
+ assert!(b.is_bit_set(55));
+ for i in 65..128 {
+ b.set_bit(i);
+ }
+ for i in 65..128 {
+ assert!(b.is_bit_set(i));
+ }
+ b.reset_addr_range(0, 16 * 1024);
+ for i in 0..128 {
+ assert!(!b.is_bit_set(i));
+ }
+ }
}
diff --git a/crates/vm-memory/src/bitmap/backend/atomic_bitmap_arc.rs b/crates/vm-memory/src/bitmap/backend/atomic_bitmap_arc.rs
index 3545623..7d52050 100644
--- a/crates/vm-memory/src/bitmap/backend/atomic_bitmap_arc.rs
+++ b/crates/vm-memory/src/bitmap/backend/atomic_bitmap_arc.rs
@@ -77,10 +77,14 @@
use super::*;
use crate::bitmap::tests::test_bitmap;
+ use std::num::NonZeroUsize;
#[test]
fn test_bitmap_impl() {
- let b = AtomicBitmapArc::new(AtomicBitmap::new(0x2000, 128));
+ // SAFETY: `128` is non-zero.
+ let b = AtomicBitmapArc::new(AtomicBitmap::new(0x800, unsafe {
+ NonZeroUsize::new_unchecked(128)
+ }));
test_bitmap(&b);
}
}
diff --git a/crates/vm-memory/src/bitmap/backend/mod.rs b/crates/vm-memory/src/bitmap/backend/mod.rs
index 256585e..8d2d866 100644
--- a/crates/vm-memory/src/bitmap/backend/mod.rs
+++ b/crates/vm-memory/src/bitmap/backend/mod.rs
@@ -6,5 +6,4 @@
mod slice;
pub use atomic_bitmap::AtomicBitmap;
-pub use atomic_bitmap_arc::AtomicBitmapArc;
pub use slice::{ArcSlice, RefSlice};
diff --git a/crates/vm-memory/src/bitmap/backend/slice.rs b/crates/vm-memory/src/bitmap/backend/slice.rs
index 913a2f5..383ce69 100644
--- a/crates/vm-memory/src/bitmap/backend/slice.rs
+++ b/crates/vm-memory/src/bitmap/backend/slice.rs
@@ -99,15 +99,16 @@
use crate::bitmap::tests::{range_is_clean, range_is_dirty, test_bitmap};
use crate::bitmap::AtomicBitmap;
+ use std::num::NonZeroUsize;
#[test]
fn test_slice() {
- let bitmap_size = 0x1_0000;
- let dirty_offset = 0x1000;
+ let bitmap_size = 0x800;
+ let dirty_offset = 0x400;
let dirty_len = 0x100;
{
- let bitmap = AtomicBitmap::new(bitmap_size, 1);
+ let bitmap = AtomicBitmap::new(bitmap_size, NonZeroUsize::MIN);
let slice1 = bitmap.slice_at(0);
let slice2 = bitmap.slice_at(dirty_offset);
@@ -121,7 +122,7 @@
}
{
- let bitmap = AtomicBitmap::new(bitmap_size, 1);
+ let bitmap = AtomicBitmap::new(bitmap_size, NonZeroUsize::MIN);
let slice = bitmap.slice_at(0);
test_bitmap(&slice);
}
diff --git a/crates/vm-memory/src/bitmap/mod.rs b/crates/vm-memory/src/bitmap/mod.rs
index e8c0987..51318ed 100644
--- a/crates/vm-memory/src/bitmap/mod.rs
+++ b/crates/vm-memory/src/bitmap/mod.rs
@@ -140,10 +140,10 @@
}
// Helper method that tests a generic `B: Bitmap` implementation. It assumes `b` covers
- // an area of length at least 0x2000.
+ // an area of length at least 0x800.
pub fn test_bitmap<B: Bitmap>(b: &B) {
- let len = 0x2000;
- let dirty_offset = 0x1000;
+ let len = 0x800;
+ let dirty_offset = 0x400;
let dirty_len = 0x100;
// Some basic checks.
@@ -267,6 +267,7 @@
dirty_offset += step;
// Test `read_from`.
+ #[allow(deprecated)] // test of deprecated functions
h.test_access(bytes, dirty_offset, BUF_SIZE, |m, addr| {
assert_eq!(
m.read_from(addr, &mut Cursor::new(&buf), BUF_SIZE).unwrap(),
@@ -277,6 +278,7 @@
dirty_offset += step;
// Test `read_exact_from`.
+ #[allow(deprecated)] // test of deprecated functions
h.test_access(bytes, dirty_offset, BUF_SIZE, |m, addr| {
m.read_exact_from(addr, &mut Cursor::new(&buf), BUF_SIZE)
.unwrap()
diff --git a/crates/vm-memory/src/bytes.rs b/crates/vm-memory/src/bytes.rs
index 2430708..6274c3a 100644
--- a/crates/vm-memory/src/bytes.rs
+++ b/crates/vm-memory/src/bytes.rs
@@ -12,7 +12,7 @@
//! data.
use std::io::{Read, Write};
-use std::mem::size_of;
+use std::mem::{size_of, MaybeUninit};
use std::result::Result;
use std::slice::{from_raw_parts, from_raw_parts_mut};
use std::sync::atomic::Ordering;
@@ -31,7 +31,7 @@
/// cause undefined behavior.
///
/// Implementing this trait guarantees that it is safe to instantiate the struct with random data.
-pub unsafe trait ByteValued: Copy + Default + Send + Sync {
+pub unsafe trait ByteValued: Copy + Send + Sync {
/// Converts a slice of raw data into a reference of `Self`.
///
/// The value of `data` is not copied. Instead a reference is made from the given slice. The
@@ -191,7 +191,8 @@
target_arch = "x86_64",
target_arch = "aarch64",
target_arch = "powerpc64",
- target_arch = "s390x"
+ target_arch = "s390x",
+ target_arch = "riscv64"
))]
impl_atomic_access!(i64, std::sync::atomic::AtomicI64);
@@ -202,7 +203,8 @@
target_arch = "x86_64",
target_arch = "aarch64",
target_arch = "powerpc64",
- target_arch = "s390x"
+ target_arch = "s390x",
+ target_arch = "riscv64"
))]
impl_atomic_access!(u64, std::sync::atomic::AtomicU64);
@@ -268,7 +270,10 @@
///
/// Returns an error if there's not enough data inside the container.
fn read_obj<T: ByteValued>(&self, addr: A) -> Result<T, Self::E> {
- let mut result: T = Default::default();
+ // SAFETY: ByteValued objects must be assignable from a arbitrary byte
+ // sequence and are mandated to be packed.
+ // Hence, zeroed memory is a fine initialization.
+ let mut result: T = unsafe { MaybeUninit::<T>::zeroed().assume_init() };
self.read_slice(result.as_mut_slice(), addr).map(|_| result)
}
@@ -280,6 +285,9 @@
/// * `addr` - Begin writing at this address.
/// * `src` - Copy from `src` into the container.
/// * `count` - Copy `count` bytes from `src` into the container.
+ #[deprecated(
+ note = "Use `.read_volatile_from` or the functions of the `ReadVolatile` trait instead"
+ )]
fn read_from<F>(&self, addr: A, src: &mut F, count: usize) -> Result<usize, Self::E>
where
F: Read;
@@ -295,6 +303,9 @@
/// * `addr` - Begin writing at this address.
/// * `src` - Copy from `src` into the container.
/// * `count` - Copy exactly `count` bytes from `src` into the container.
+ #[deprecated(
+ note = "Use `.read_exact_volatile_from` or the functions of the `ReadVolatile` trait instead"
+ )]
fn read_exact_from<F>(&self, addr: A, src: &mut F, count: usize) -> Result<(), Self::E>
where
F: Read;
@@ -307,6 +318,9 @@
/// * `addr` - Begin reading from this address.
/// * `dst` - Copy from the container to `dst`.
/// * `count` - Copy `count` bytes from the container to `dst`.
+ #[deprecated(
+ note = "Use `.write_volatile_to` or the functions of the `WriteVolatile` trait instead"
+ )]
fn write_to<F>(&self, addr: A, dst: &mut F, count: usize) -> Result<usize, Self::E>
where
F: Write;
@@ -322,6 +336,9 @@
/// * `addr` - Begin reading from this address.
/// * `dst` - Copy from the container to `dst`.
/// * `count` - Copy exactly `count` bytes from the container to `dst`.
+ #[deprecated(
+ note = "Use `.write_all_volatile_to` or the functions of the `WriteVolatile` trait instead"
+ )]
fn write_all_to<F>(&self, addr: A, dst: &mut F, count: usize) -> Result<(), Self::E>
where
F: Write;
@@ -510,11 +527,11 @@
fn test_bytes() {
let bytes = MockBytesContainer::new();
- assert!(bytes.write_obj(std::u64::MAX, 0).is_ok());
- assert_eq!(bytes.read_obj::<u64>(0).unwrap(), std::u64::MAX);
+ assert!(bytes.write_obj(u64::MAX, 0).is_ok());
+ assert_eq!(bytes.read_obj::<u64>(0).unwrap(), u64::MAX);
assert!(bytes
- .write_obj(std::u64::MAX, MOCK_BYTES_CONTAINER_SIZE)
+ .write_obj(u64::MAX, MOCK_BYTES_CONTAINER_SIZE)
.is_err());
assert!(bytes.read_obj::<u64>(MOCK_BYTES_CONTAINER_SIZE).is_err());
}
diff --git a/crates/vm-memory/src/guest_memory.rs b/crates/vm-memory/src/guest_memory.rs
index ba615ef..98c68b7 100644
--- a/crates/vm-memory/src/guest_memory.rs
+++ b/crates/vm-memory/src/guest_memory.rs
@@ -19,12 +19,12 @@
//! Traits and Structs
//! - [`GuestAddress`](struct.GuestAddress.html): represents a guest physical address (GPA).
//! - [`MemoryRegionAddress`](struct.MemoryRegionAddress.html): represents an offset inside a
-//! region.
+//! region.
//! - [`GuestMemoryRegion`](trait.GuestMemoryRegion.html): represent a continuous region of guest's
-//! physical memory.
+//! physical memory.
//! - [`GuestMemory`](trait.GuestMemory.html): represent a collection of `GuestMemoryRegion`
-//! objects.
-//! The main responsibilities of the `GuestMemory` trait are:
+//! objects.
+//! The main responsibilities of the `GuestMemory` trait are:
//! - hide the detail of accessing guest's physical address.
//! - map a request address to a `GuestMemoryRegion` object and relay the request to it.
//! - handle cases where an access request spanning two or more `GuestMemoryRegion` objects.
@@ -52,7 +52,9 @@
use crate::address::{Address, AddressValue};
use crate::bitmap::{Bitmap, BS, MS};
use crate::bytes::{AtomicAccess, Bytes};
+use crate::io::{ReadVolatile, WriteVolatile};
use crate::volatile_memory::{self, VolatileSlice};
+use crate::GuestMemoryError;
static MAX_ACCESS_CHUNK: usize = 4096;
@@ -75,6 +77,14 @@
/// Host virtual address not available.
#[error("Guest memory error: host virtual address not available")]
HostAddressNotAvailable,
+ /// The length returned by the callback passed to `try_access` is outside the address range.
+ #[error(
+ "The length returned by the callback passed to `try_access` is outside the address range."
+ )]
+ CallbackOutOfRange,
+ /// The address to be read by `try_access` is outside the address range.
+ #[error("The address to be read by `try_access` is outside the address range")]
+ GuestAddressOverflow,
}
impl From<volatile_memory::Error> for Error {
@@ -412,42 +422,6 @@
}
}
-/// Lifetime generic associated iterators. The actual iterator type is defined through associated
-/// item `Iter`, for example:
-///
-/// ```
-/// # use std::marker::PhantomData;
-/// # use vm_memory::guest_memory::GuestMemoryIterator;
-/// #
-/// // Declare the relevant Region and Memory types
-/// struct MyGuestRegion {/* fields omitted */}
-/// struct MyGuestMemory {/* fields omitted */}
-///
-/// // Make an Iterator type to iterate over the Regions
-/// # /*
-/// struct MyGuestMemoryIter<'a> {/* fields omitted */}
-/// # */
-/// # struct MyGuestMemoryIter<'a> {
-/// # _marker: PhantomData<&'a MyGuestRegion>,
-/// # }
-/// impl<'a> Iterator for MyGuestMemoryIter<'a> {
-/// type Item = &'a MyGuestRegion;
-/// fn next(&mut self) -> Option<&'a MyGuestRegion> {
-/// // ...
-/// # None
-/// }
-/// }
-///
-/// // Associate the Iter type with the Memory type
-/// impl<'a> GuestMemoryIterator<'a, MyGuestRegion> for MyGuestMemory {
-/// type Iter = MyGuestMemoryIter<'a>;
-/// }
-/// ```
-pub trait GuestMemoryIterator<'a, R: 'a> {
- /// Type of the `iter` method's return value.
- type Iter: Iterator<Item = &'a R>;
-}
-
/// `GuestMemory` represents a container for an *immutable* collection of
/// `GuestMemoryRegion` objects. `GuestMemory` provides the `Bytes<GuestAddress>`
/// trait to hide the details of accessing guest memory by physical address.
@@ -461,9 +435,6 @@
/// Type of objects hosted by the address space.
type R: GuestMemoryRegion;
- /// Lifetime generic associated iterators. Usually this is just `Self`.
- type I: for<'a> GuestMemoryIterator<'a, Self::R>;
-
/// Returns the number of regions in the collection.
fn num_regions(&self) -> usize;
@@ -504,7 +475,7 @@
///
/// * Compute the total size of all memory mappings in KB by iterating over the memory regions
/// and dividing their sizes to 1024, then summing up the values in an accumulator. (uses the
- /// `backend-mmap` feature)
+ /// `backend-mmap` feature)
///
/// ```
/// # #[cfg(feature = "backend-mmap")]
@@ -523,7 +494,7 @@
/// assert_eq!(3, total_size)
/// # }
/// ```
- fn iter(&self) -> <Self::I as GuestMemoryIterator<Self::R>>::Iter;
+ fn iter(&self) -> impl Iterator<Item = &Self::R>;
/// Applies two functions, specified as callbacks, on the inner memory regions.
///
@@ -540,7 +511,7 @@
///
/// * Compute the total size of all memory mappings in KB by iterating over the memory regions
/// and dividing their sizes to 1024, then summing up the values in an accumulator. (uses the
- /// `backend-mmap` feature)
+ /// `backend-mmap` feature)
///
/// ```
/// # #[cfg(feature = "backend-mmap")]
@@ -644,15 +615,15 @@
Ok(0) => return Ok(total),
// made some progress
Ok(len) => {
- total += len;
- if total == count {
- break;
- }
+ total = match total.checked_add(len) {
+ Some(x) if x < count => x,
+ Some(x) if x == count => return Ok(x),
+ _ => return Err(Error::CallbackOutOfRange),
+ };
cur = match cur.overflowing_add(len as GuestUsize) {
- (GuestAddress(0), _) => GuestAddress(0),
- (result, false) => result,
- (_, true) => panic!("guest address overflow"),
- }
+ (x @ GuestAddress(0), _) | (x, false) => x,
+ (_, true) => return Err(Error::GuestAddressOverflow),
+ };
}
// error happened
e => return e,
@@ -665,6 +636,143 @@
}
}
+ /// Reads up to `count` bytes from an object and writes them into guest memory at `addr`.
+ ///
+ /// Returns the number of bytes written into guest memory.
+ ///
+ /// # Arguments
+ /// * `addr` - Begin writing at this address.
+ /// * `src` - Copy from `src` into the container.
+ /// * `count` - Copy `count` bytes from `src` into the container.
+ ///
+ /// # Examples
+ ///
+ /// * Read bytes from /dev/urandom (uses the `backend-mmap` feature)
+ ///
+ /// ```
+ /// # #[cfg(feature = "backend-mmap")]
+ /// # {
+ /// # use vm_memory::{Address, GuestMemory, Bytes, GuestAddress, GuestMemoryMmap};
+ /// # use std::fs::File;
+ /// # use std::path::Path;
+ /// #
+ /// # let start_addr = GuestAddress(0x1000);
+ /// # let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)])
+ /// # .expect("Could not create guest memory");
+ /// # let addr = GuestAddress(0x1010);
+ /// # let mut file = if cfg!(unix) {
+ /// let mut file = File::open(Path::new("/dev/urandom")).expect("Could not open /dev/urandom");
+ /// # file
+ /// # } else {
+ /// # File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe"))
+ /// # .expect("Could not open c:\\Windows\\system32\\ntoskrnl.exe")
+ /// # };
+ ///
+ /// gm.read_volatile_from(addr, &mut file, 128)
+ /// .expect("Could not read from /dev/urandom into guest memory");
+ ///
+ /// let read_addr = addr.checked_add(8).expect("Could not compute read address");
+ /// let rand_val: u32 = gm
+ /// .read_obj(read_addr)
+ /// .expect("Could not read u32 val from /dev/urandom");
+ /// # }
+ /// ```
+ fn read_volatile_from<F>(&self, addr: GuestAddress, src: &mut F, count: usize) -> Result<usize>
+ where
+ F: ReadVolatile,
+ {
+ self.try_access(count, addr, |offset, len, caddr, region| -> Result<usize> {
+ // Check if something bad happened before doing unsafe things.
+ assert!(offset <= count);
+
+ let mut vslice = region.get_slice(caddr, len)?;
+
+ src.read_volatile(&mut vslice)
+ .map_err(GuestMemoryError::from)
+ })
+ }
+
+ /// Reads up to `count` bytes from guest memory at `addr` and writes them it into an object.
+ ///
+ /// Returns the number of bytes copied from guest memory.
+ ///
+ /// # Arguments
+ /// * `addr` - Begin reading from this address.
+ /// * `dst` - Copy from guest memory to `dst`.
+ /// * `count` - Copy `count` bytes from guest memory to `dst`.
+ fn write_volatile_to<F>(&self, addr: GuestAddress, dst: &mut F, count: usize) -> Result<usize>
+ where
+ F: WriteVolatile,
+ {
+ self.try_access(count, addr, |offset, len, caddr, region| -> Result<usize> {
+ // Check if something bad happened before doing unsafe things.
+ assert!(offset <= count);
+
+ let vslice = region.get_slice(caddr, len)?;
+
+ // For a non-RAM region, reading could have side effects, so we
+ // must use write_all().
+ dst.write_all_volatile(&vslice)?;
+
+ Ok(len)
+ })
+ }
+
+ /// Reads exactly `count` bytes from an object and writes them into guest memory at `addr`.
+ ///
+ /// # Errors
+ ///
+ /// Returns an error if `count` bytes couldn't have been copied from `src` to guest memory.
+ /// Part of the data may have been copied nevertheless.
+ ///
+ /// # Arguments
+ /// * `addr` - Begin writing at this address.
+ /// * `src` - Copy from `src` into guest memory.
+ /// * `count` - Copy exactly `count` bytes from `src` into guest memory.
+ fn read_exact_volatile_from<F>(
+ &self,
+ addr: GuestAddress,
+ src: &mut F,
+ count: usize,
+ ) -> Result<()>
+ where
+ F: ReadVolatile,
+ {
+ let res = self.read_volatile_from(addr, src, count)?;
+ if res != count {
+ return Err(Error::PartialBuffer {
+ expected: count,
+ completed: res,
+ });
+ }
+ Ok(())
+ }
+
+ /// Reads exactly `count` bytes from guest memory at `addr` and writes them into an object.
+ ///
+ /// # Errors
+ ///
+ /// Returns an error if `count` bytes couldn't have been copied from guest memory to `dst`.
+ /// Part of the data may have been copied nevertheless.
+ ///
+ /// # Arguments
+ /// * `addr` - Begin reading from this address.
+ /// * `dst` - Copy from guest memory to `dst`.
+ /// * `count` - Copy exactly `count` bytes from guest memory to `dst`.
+ fn write_all_volatile_to<F>(&self, addr: GuestAddress, dst: &mut F, count: usize) -> Result<()>
+ where
+ F: WriteVolatile,
+ {
+ let res = self.write_volatile_to(addr, dst, count)?;
+ if res != count {
+ return Err(Error::PartialBuffer {
+ expected: count,
+ completed: res,
+ });
+ }
+ Ok(())
+ }
+
/// Get the host virtual address corresponding to the guest address.
///
/// Some [`GuestMemory`](trait.GuestMemory.html) implementations, like `GuestMemoryMmap`,
@@ -856,6 +964,7 @@
where
F: Read,
{
+ #[allow(deprecated)] // this function itself is deprecated
let res = self.read_from(addr, src, count)?;
if res != count {
return Err(Error::PartialBuffer {
@@ -949,6 +1058,7 @@
where
F: Write,
{
+ #[allow(deprecated)] // this function itself is deprecated
let res = self.write_to(addr, dst, count)?;
if res != count {
return Err(Error::PartialBuffer {
@@ -983,8 +1093,6 @@
#[cfg(feature = "backend-mmap")]
use crate::GuestAddress;
#[cfg(feature = "backend-mmap")]
- use std::io::Cursor;
- #[cfg(feature = "backend-mmap")]
use std::time::{Duration, Instant};
use vmm_sys_util::tempfile::TempFile;
@@ -1024,7 +1132,7 @@
let count: usize = 0x20;
assert_eq!(
0x20_usize,
- mem.read_from(offset, &mut Cursor::new(&image), count)
+ mem.read_volatile_from(offset, &mut image.as_slice(), count)
.unwrap()
);
}
@@ -1179,19 +1287,24 @@
assert!(mem.write_obj(obj, addr).is_ok());
assert!(mem.read_obj::<ZeroSizedStruct>(addr).is_ok());
- assert_eq!(mem.read_from(addr, &mut Cursor::new(&image), 0).unwrap(), 0);
-
- assert!(mem
- .read_exact_from(addr, &mut Cursor::new(&image), 0)
- .is_ok());
-
assert_eq!(
- mem.write_to(addr, &mut Cursor::new(&mut image), 0).unwrap(),
+ mem.read_volatile_from(addr, &mut image.as_slice(), 0)
+ .unwrap(),
0
);
assert!(mem
- .write_all_to(addr, &mut Cursor::new(&mut image), 0)
+ .read_exact_volatile_from(addr, &mut image.as_slice(), 0)
+ .is_ok());
+
+ assert_eq!(
+ mem.write_volatile_to(addr, &mut image.as_mut_slice(), 0)
+ .unwrap(),
+ 0
+ );
+
+ assert!(mem
+ .write_all_volatile_to(addr, &mut image.as_mut_slice(), 0)
.is_ok());
}
diff --git a/crates/vm-memory/src/io.rs b/crates/vm-memory/src/io.rs
new file mode 100644
index 0000000..ada941c
--- /dev/null
+++ b/crates/vm-memory/src/io.rs
@@ -0,0 +1,610 @@
+// Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+//! Module containing versions of the standard library's [`Read`](std::io::Read) and
+//! [`Write`](std::io::Write) traits compatible with volatile memory accesses.
+
+use crate::bitmap::BitmapSlice;
+use crate::volatile_memory::copy_slice_impl::{copy_from_volatile_slice, copy_to_volatile_slice};
+use crate::{VolatileMemoryError, VolatileSlice};
+use std::io::{Cursor, ErrorKind, Stdout};
+use std::os::fd::AsRawFd;
+
+/// A version of the standard library's [`Read`](std::io::Read) trait that operates on volatile
+/// memory instead of slices
+///
+/// This trait is needed as rust slices (`&[u8]` and `&mut [u8]`) cannot be used when operating on
+/// guest memory [1].
+///
+/// [1]: https://github.com/rust-vmm/vm-memory/pull/217
+pub trait ReadVolatile {
+ /// Tries to read some bytes into the given [`VolatileSlice`] buffer, returning how many bytes
+ /// were read.
+ ///
+ /// The behavior of implementations should be identical to [`Read::read`](std::io::Read::read)
+ fn read_volatile<B: BitmapSlice>(
+ &mut self,
+ buf: &mut VolatileSlice<B>,
+ ) -> Result<usize, VolatileMemoryError>;
+
+ /// Tries to fill the given [`VolatileSlice`] buffer by reading from `self` returning an error
+ /// if insufficient bytes could be read.
+ ///
+ /// The default implementation is identical to that of [`Read::read_exact`](std::io::Read::read_exact)
+ fn read_exact_volatile<B: BitmapSlice>(
+ &mut self,
+ buf: &mut VolatileSlice<B>,
+ ) -> Result<(), VolatileMemoryError> {
+ // Implementation based on https://github.com/rust-lang/rust/blob/7e7483d26e3cec7a44ef00cf7ae6c9c8c918bec6/library/std/src/io/mod.rs#L465
+
+ let mut partial_buf = buf.offset(0)?;
+
+ while !partial_buf.is_empty() {
+ match self.read_volatile(&mut partial_buf) {
+ Err(VolatileMemoryError::IOError(err)) if err.kind() == ErrorKind::Interrupted => {
+ continue
+ }
+ Ok(0) => {
+ return Err(VolatileMemoryError::IOError(std::io::Error::new(
+ ErrorKind::UnexpectedEof,
+ "failed to fill whole buffer",
+ )))
+ }
+ Ok(bytes_read) => partial_buf = partial_buf.offset(bytes_read)?,
+ Err(err) => return Err(err),
+ }
+ }
+
+ Ok(())
+ }
+}
+
+/// A version of the standard library's [`Write`](std::io::Write) trait that operates on volatile
+/// memory instead of slices.
+///
+/// This trait is needed as rust slices (`&[u8]` and `&mut [u8]`) cannot be used when operating on
+/// guest memory [1].
+///
+/// [1]: https://github.com/rust-vmm/vm-memory/pull/217
+pub trait WriteVolatile {
+ /// Tries to write some bytes from the given [`VolatileSlice`] buffer, returning how many bytes
+ /// were written.
+ ///
+ /// The behavior of implementations should be identical to [`Write::write`](std::io::Write::write)
+ fn write_volatile<B: BitmapSlice>(
+ &mut self,
+ buf: &VolatileSlice<B>,
+ ) -> Result<usize, VolatileMemoryError>;
+
+ /// Tries write the entire content of the given [`VolatileSlice`] buffer to `self` returning an
+ /// error if not all bytes could be written.
+ ///
+ /// The default implementation is identical to that of [`Write::write_all`](std::io::Write::write_all)
+ fn write_all_volatile<B: BitmapSlice>(
+ &mut self,
+ buf: &VolatileSlice<B>,
+ ) -> Result<(), VolatileMemoryError> {
+ // Based on https://github.com/rust-lang/rust/blob/7e7483d26e3cec7a44ef00cf7ae6c9c8c918bec6/library/std/src/io/mod.rs#L1570
+
+ let mut partial_buf = buf.offset(0)?;
+
+ while !partial_buf.is_empty() {
+ match self.write_volatile(&partial_buf) {
+ Err(VolatileMemoryError::IOError(err)) if err.kind() == ErrorKind::Interrupted => {
+ continue
+ }
+ Ok(0) => {
+ return Err(VolatileMemoryError::IOError(std::io::Error::new(
+ ErrorKind::WriteZero,
+ "failed to write whole buffer",
+ )))
+ }
+ Ok(bytes_written) => partial_buf = partial_buf.offset(bytes_written)?,
+ Err(err) => return Err(err),
+ }
+ }
+
+ Ok(())
+ }
+}
+
+// We explicitly implement our traits for [`std::fs::File`] and [`std::os::unix::net::UnixStream`]
+// instead of providing blanket implementation for [`AsRawFd`] due to trait coherence limitations: A
+// blanket implementation would prevent us from providing implementations for `&mut [u8]` below, as
+// "an upstream crate could implement AsRawFd for &mut [u8]`.
+
+macro_rules! impl_read_write_volatile_for_raw_fd {
+ ($raw_fd_ty:ty) => {
+ impl ReadVolatile for $raw_fd_ty {
+ fn read_volatile<B: BitmapSlice>(
+ &mut self,
+ buf: &mut VolatileSlice<B>,
+ ) -> Result<usize, VolatileMemoryError> {
+ read_volatile_raw_fd(self, buf)
+ }
+ }
+
+ impl WriteVolatile for $raw_fd_ty {
+ fn write_volatile<B: BitmapSlice>(
+ &mut self,
+ buf: &VolatileSlice<B>,
+ ) -> Result<usize, VolatileMemoryError> {
+ write_volatile_raw_fd(self, buf)
+ }
+ }
+ };
+}
+
+impl WriteVolatile for Stdout {
+ fn write_volatile<B: BitmapSlice>(
+ &mut self,
+ buf: &VolatileSlice<B>,
+ ) -> Result<usize, VolatileMemoryError> {
+ write_volatile_raw_fd(self, buf)
+ }
+}
+
+impl_read_write_volatile_for_raw_fd!(std::fs::File);
+impl_read_write_volatile_for_raw_fd!(std::os::unix::net::UnixStream);
+impl_read_write_volatile_for_raw_fd!(std::os::fd::OwnedFd);
+impl_read_write_volatile_for_raw_fd!(std::os::fd::BorrowedFd<'_>);
+
+/// Tries to do a single `read` syscall on the provided file descriptor, storing the data raed in
+/// the given [`VolatileSlice`].
+///
+/// Returns the numbers of bytes read.
+fn read_volatile_raw_fd<Fd: AsRawFd>(
+ raw_fd: &mut Fd,
+ buf: &mut VolatileSlice<impl BitmapSlice>,
+) -> Result<usize, VolatileMemoryError> {
+ let fd = raw_fd.as_raw_fd();
+ let guard = buf.ptr_guard_mut();
+
+ let dst = guard.as_ptr().cast::<libc::c_void>();
+
+ // SAFETY: We got a valid file descriptor from `AsRawFd`. The memory pointed to by `dst` is
+ // valid for writes of length `buf.len() by the invariants upheld by the constructor
+ // of `VolatileSlice`.
+ let bytes_read = unsafe { libc::read(fd, dst, buf.len()) };
+
+ if bytes_read < 0 {
+ // We don't know if a partial read might have happened, so mark everything as dirty
+ buf.bitmap().mark_dirty(0, buf.len());
+
+ Err(VolatileMemoryError::IOError(std::io::Error::last_os_error()))
+ } else {
+ let bytes_read = bytes_read.try_into().unwrap();
+ buf.bitmap().mark_dirty(0, bytes_read);
+ Ok(bytes_read)
+ }
+}
+
+/// Tries to do a single `write` syscall on the provided file descriptor, attempting to write the
+/// data stored in the given [`VolatileSlice`].
+///
+/// Returns the numbers of bytes written.
+fn write_volatile_raw_fd<Fd: AsRawFd>(
+ raw_fd: &mut Fd,
+ buf: &VolatileSlice<impl BitmapSlice>,
+) -> Result<usize, VolatileMemoryError> {
+ let fd = raw_fd.as_raw_fd();
+ let guard = buf.ptr_guard();
+
+ let src = guard.as_ptr().cast::<libc::c_void>();
+
+ // SAFETY: We got a valid file descriptor from `AsRawFd`. The memory pointed to by `src` is
+ // valid for reads of length `buf.len() by the invariants upheld by the constructor
+ // of `VolatileSlice`.
+ let bytes_written = unsafe { libc::write(fd, src, buf.len()) };
+
+ if bytes_written < 0 {
+ Err(VolatileMemoryError::IOError(std::io::Error::last_os_error()))
+ } else {
+ Ok(bytes_written.try_into().unwrap())
+ }
+}
+
+impl WriteVolatile for &mut [u8] {
+ fn write_volatile<B: BitmapSlice>(
+ &mut self,
+ buf: &VolatileSlice<B>,
+ ) -> Result<usize, VolatileMemoryError> {
+ let total = buf.len().min(self.len());
+ let src = buf.subslice(0, total)?;
+
+ // SAFETY:
+ // We check above that `src` is contiguously allocated memory of length `total <= self.len())`.
+ // Furthermore, both src and dst of the call to
+ // copy_from_volatile_slice are valid for reads and writes respectively of length `total`
+ // since total is the minimum of lengths of the memory areas pointed to. The areas do not
+ // overlap, since `dst` is inside guest memory, and buf is a slice (no slices to guest
+ // memory are possible without violating rust's aliasing rules).
+ let written = unsafe { copy_from_volatile_slice(self.as_mut_ptr(), &src, total) };
+
+ // Advance the slice, just like the stdlib: https://doc.rust-lang.org/src/std/io/impls.rs.html#335
+ *self = std::mem::take(self).split_at_mut(written).1;
+
+ Ok(written)
+ }
+
+ fn write_all_volatile<B: BitmapSlice>(
+ &mut self,
+ buf: &VolatileSlice<B>,
+ ) -> Result<(), VolatileMemoryError> {
+ // Based on https://github.com/rust-lang/rust/blob/f7b831ac8a897273f78b9f47165cf8e54066ce4b/library/std/src/io/impls.rs#L376-L382
+ if self.write_volatile(buf)? == buf.len() {
+ Ok(())
+ } else {
+ Err(VolatileMemoryError::IOError(std::io::Error::new(
+ ErrorKind::WriteZero,
+ "failed to write whole buffer",
+ )))
+ }
+ }
+}
+
+impl ReadVolatile for &[u8] {
+ fn read_volatile<B: BitmapSlice>(
+ &mut self,
+ buf: &mut VolatileSlice<B>,
+ ) -> Result<usize, VolatileMemoryError> {
+ let total = buf.len().min(self.len());
+ let dst = buf.subslice(0, total)?;
+
+ // SAFETY:
+ // We check above that `dst` is contiguously allocated memory of length `total <= self.len())`.
+ // Furthermore, both src and dst of the call to copy_to_volatile_slice are valid for reads
+ // and writes respectively of length `total` since total is the minimum of lengths of the
+ // memory areas pointed to. The areas do not overlap, since `dst` is inside guest memory,
+ // and buf is a slice (no slices to guest memory are possible without violating rust's aliasing rules).
+ let read = unsafe { copy_to_volatile_slice(&dst, self.as_ptr(), total) };
+
+ // Advance the slice, just like the stdlib: https://doc.rust-lang.org/src/std/io/impls.rs.html#232-310
+ *self = self.split_at(read).1;
+
+ Ok(read)
+ }
+
+ fn read_exact_volatile<B: BitmapSlice>(
+ &mut self,
+ buf: &mut VolatileSlice<B>,
+ ) -> Result<(), VolatileMemoryError> {
+ // Based on https://github.com/rust-lang/rust/blob/f7b831ac8a897273f78b9f47165cf8e54066ce4b/library/std/src/io/impls.rs#L282-L302
+ if buf.len() > self.len() {
+ return Err(VolatileMemoryError::IOError(std::io::Error::new(
+ ErrorKind::UnexpectedEof,
+ "failed to fill whole buffer",
+ )));
+ }
+
+ self.read_volatile(buf).map(|_| ())
+ }
+}
+
+// WriteVolatile implementation for Vec<u8> is based upon the Write impl for Vec, which
+// defers to Vec::append_elements, after which the below functionality is modelled.
+impl WriteVolatile for Vec<u8> {
+ fn write_volatile<B: BitmapSlice>(
+ &mut self,
+ buf: &VolatileSlice<B>,
+ ) -> Result<usize, VolatileMemoryError> {
+ let count = buf.len();
+ self.reserve(count);
+ let len = self.len();
+
+ // SAFETY: Calling Vec::reserve() above guarantees the the backing storage of the Vec has
+ // length at least `len + count`. This means that self.as_mut_ptr().add(len) remains within
+ // the same allocated object, the offset does not exceed isize (as otherwise reserve would
+ // have panicked), and does not rely on address space wrapping around.
+ // In particular, the entire `count` bytes after `self.as_mut_ptr().add(count)` is
+ // contiguously allocated and valid for writes.
+ // Lastly, `copy_to_volatile_slice` correctly initialized `copied_len` additional bytes
+ // in the Vec's backing storage, and we assert this to be equal to `count`. Additionally,
+ // `len + count` is at most the reserved capacity of the vector. Thus the call to `set_len`
+ // is safe.
+ unsafe {
+ let copied_len = copy_from_volatile_slice(self.as_mut_ptr().add(len), buf, count);
+
+ assert_eq!(copied_len, count);
+ self.set_len(len + count);
+ }
+ Ok(count)
+ }
+}
+
+// ReadVolatile and WriteVolatile implementations for Cursor<T> is modelled after the standard
+// library's implementation (modulo having to inline `Cursor::remaining_slice`, as that's nightly only)
+impl<T> ReadVolatile for Cursor<T>
+where
+ T: AsRef<[u8]>,
+{
+ fn read_volatile<B: BitmapSlice>(
+ &mut self,
+ buf: &mut VolatileSlice<B>,
+ ) -> Result<usize, VolatileMemoryError> {
+ let inner = self.get_ref().as_ref();
+ let len = self.position().min(inner.len() as u64);
+ let n = ReadVolatile::read_volatile(&mut &inner[(len as usize)..], buf)?;
+ self.set_position(self.position() + n as u64);
+ Ok(n)
+ }
+
+ fn read_exact_volatile<B: BitmapSlice>(
+ &mut self,
+ buf: &mut VolatileSlice<B>,
+ ) -> Result<(), VolatileMemoryError> {
+ let inner = self.get_ref().as_ref();
+ let n = buf.len();
+ let len = self.position().min(inner.len() as u64);
+ ReadVolatile::read_exact_volatile(&mut &inner[(len as usize)..], buf)?;
+ self.set_position(self.position() + n as u64);
+ Ok(())
+ }
+}
+
+impl WriteVolatile for Cursor<&mut [u8]> {
+ fn write_volatile<B: BitmapSlice>(
+ &mut self,
+ buf: &VolatileSlice<B>,
+ ) -> Result<usize, VolatileMemoryError> {
+ let pos = self.position().min(self.get_ref().len() as u64);
+ let n = WriteVolatile::write_volatile(&mut &mut self.get_mut()[(pos as usize)..], buf)?;
+ self.set_position(self.position() + n as u64);
+ Ok(n)
+ }
+
+ // no write_all provided in standard library, since our default for write_all is based on the
+ // standard library's write_all, omitting it here as well will correctly mimic stdlib behavior.
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::io::{ReadVolatile, WriteVolatile};
+ use crate::{VolatileMemoryError, VolatileSlice};
+ use std::io::{Cursor, ErrorKind, Read, Seek, Write};
+ use vmm_sys_util::tempfile::TempFile;
+
+ // ---- Test ReadVolatile for &[u8] ----
+ fn read_4_bytes_to_5_byte_memory(source: Vec<u8>, expected_output: [u8; 5]) {
+ // Test read_volatile for &[u8] works
+ let mut memory = vec![0u8; 5];
+
+ assert_eq!(
+ (&source[..])
+ .read_volatile(&mut VolatileSlice::from(&mut memory[..4]))
+ .unwrap(),
+ source.len().min(4)
+ );
+ assert_eq!(&memory, &expected_output);
+
+ // Test read_exact_volatile for &[u8] works
+ let mut memory = vec![0u8; 5];
+ let result = (&source[..]).read_exact_volatile(&mut VolatileSlice::from(&mut memory[..4]));
+
+ // read_exact fails if there are not enough bytes in input to completely fill
+ // memory[..4]
+ if source.len() < 4 {
+ match result.unwrap_err() {
+ VolatileMemoryError::IOError(ioe) => {
+ assert_eq!(ioe.kind(), ErrorKind::UnexpectedEof)
+ }
+ err => panic!("{:?}", err),
+ }
+ assert_eq!(memory, vec![0u8; 5]);
+ } else {
+ result.unwrap();
+ assert_eq!(&memory, &expected_output);
+ }
+ }
+
+ // ---- Test ReadVolatile for File ----
+ fn read_4_bytes_from_file(source: Vec<u8>, expected_output: [u8; 5]) {
+ let mut temp_file = TempFile::new().unwrap().into_file();
+ temp_file.write_all(source.as_ref()).unwrap();
+ temp_file.rewind().unwrap();
+
+ // Test read_volatile for File works
+ let mut memory = vec![0u8; 5];
+
+ assert_eq!(
+ temp_file
+ .read_volatile(&mut VolatileSlice::from(&mut memory[..4]))
+ .unwrap(),
+ source.len().min(4)
+ );
+ assert_eq!(&memory, &expected_output);
+
+ temp_file.rewind().unwrap();
+
+ // Test read_exact_volatile for File works
+ let mut memory = vec![0u8; 5];
+
+ let read_exact_result =
+ temp_file.read_exact_volatile(&mut VolatileSlice::from(&mut memory[..4]));
+
+ if source.len() < 4 {
+ read_exact_result.unwrap_err();
+ } else {
+ read_exact_result.unwrap();
+ }
+ assert_eq!(&memory, &expected_output);
+ }
+
+ #[test]
+ fn test_read_volatile() {
+ let test_cases = [
+ (vec![1u8, 2], [1u8, 2, 0, 0, 0]),
+ (vec![1, 2, 3, 4], [1, 2, 3, 4, 0]),
+ // ensure we don't have a buffer overrun
+ (vec![5, 6, 7, 8, 9], [5, 6, 7, 8, 0]),
+ ];
+
+ for (input, output) in test_cases {
+ read_4_bytes_to_5_byte_memory(input.clone(), output);
+ read_4_bytes_from_file(input, output);
+ }
+ }
+
+ // ---- Test WriteVolatile for &mut [u8] ----
+ fn write_4_bytes_to_5_byte_vec(mut source: Vec<u8>, expected_result: [u8; 5]) {
+ let mut memory = vec![0u8; 5];
+
+ // Test write_volatile for &mut [u8] works
+ assert_eq!(
+ (&mut memory[..4])
+ .write_volatile(&VolatileSlice::from(source.as_mut_slice()))
+ .unwrap(),
+ source.len().min(4)
+ );
+ assert_eq!(&memory, &expected_result);
+
+ // Test write_all_volatile for &mut [u8] works
+ let mut memory = vec![0u8; 5];
+
+ let result =
+ (&mut memory[..4]).write_all_volatile(&VolatileSlice::from(source.as_mut_slice()));
+
+ if source.len() > 4 {
+ match result.unwrap_err() {
+ VolatileMemoryError::IOError(ioe) => {
+ assert_eq!(ioe.kind(), ErrorKind::WriteZero)
+ }
+ err => panic!("{:?}", err),
+ }
+ // This quirky behavior of writing to the slice even in the case of failure is also
+ // exhibited by the stdlib
+ assert_eq!(&memory, &expected_result);
+ } else {
+ result.unwrap();
+ assert_eq!(&memory, &expected_result);
+ }
+ }
+
+ // ---- Test ẂriteVolatile for File works ----
+ fn write_5_bytes_to_file(mut source: Vec<u8>) {
+ // Test write_volatile for File works
+ let mut temp_file = TempFile::new().unwrap().into_file();
+
+ temp_file
+ .write_volatile(&VolatileSlice::from(source.as_mut_slice()))
+ .unwrap();
+ temp_file.rewind().unwrap();
+
+ let mut written = vec![0u8; source.len()];
+ temp_file.read_exact(written.as_mut_slice()).unwrap();
+
+ assert_eq!(source, written);
+ // check no excess bytes were written to the file
+ assert_eq!(temp_file.read(&mut [0u8]).unwrap(), 0);
+
+ // Test write_all_volatile for File works
+ let mut temp_file = TempFile::new().unwrap().into_file();
+
+ temp_file
+ .write_all_volatile(&VolatileSlice::from(source.as_mut_slice()))
+ .unwrap();
+ temp_file.rewind().unwrap();
+
+ let mut written = vec![0u8; source.len()];
+ temp_file.read_exact(written.as_mut_slice()).unwrap();
+
+ assert_eq!(source, written);
+ // check no excess bytes were written to the file
+ assert_eq!(temp_file.read(&mut [0u8]).unwrap(), 0);
+ }
+
+ #[test]
+ fn test_write_volatile() {
+ let test_cases = [
+ (vec![1u8, 2], [1u8, 2, 0, 0, 0]),
+ (vec![1, 2, 3, 4], [1, 2, 3, 4, 0]),
+ // ensure we don't have a buffer overrun
+ (vec![5, 6, 7, 8, 9], [5, 6, 7, 8, 0]),
+ ];
+
+ for (input, output) in test_cases {
+ write_4_bytes_to_5_byte_vec(input.clone(), output);
+ write_5_bytes_to_file(input);
+ }
+ }
+
+ #[test]
+ fn test_read_volatile_for_cursor() {
+ let read_buffer = [1, 2, 3, 4, 5, 6, 7];
+ let mut output = vec![0u8; 5];
+
+ let mut cursor = Cursor::new(read_buffer);
+
+ // Read 4 bytes from cursor to volatile slice (amount read limited by volatile slice length)
+ assert_eq!(
+ cursor
+ .read_volatile(&mut VolatileSlice::from(&mut output[..4]))
+ .unwrap(),
+ 4
+ );
+ assert_eq!(output, vec![1, 2, 3, 4, 0]);
+
+ // Read next 3 bytes from cursor to volatile slice (amount read limited by length of remaining data in cursor)
+ assert_eq!(
+ cursor
+ .read_volatile(&mut VolatileSlice::from(&mut output[..4]))
+ .unwrap(),
+ 3
+ );
+ assert_eq!(output, vec![5, 6, 7, 4, 0]);
+
+ cursor.set_position(0);
+ // Same as first test above, but with read_exact
+ cursor
+ .read_exact_volatile(&mut VolatileSlice::from(&mut output[..4]))
+ .unwrap();
+ assert_eq!(output, vec![1, 2, 3, 4, 0]);
+
+ // Same as above, but with read_exact. Should fail now, because we cannot fill a 4 byte buffer
+ // with whats remaining in the cursor (3 bytes). Output should remain unchanged.
+ assert!(cursor
+ .read_exact_volatile(&mut VolatileSlice::from(&mut output[..4]))
+ .is_err());
+ assert_eq!(output, vec![1, 2, 3, 4, 0]);
+ }
+
+ #[test]
+ fn test_write_volatile_for_cursor() {
+ let mut write_buffer = vec![0u8; 7];
+ let mut input = [1, 2, 3, 4];
+
+ let mut cursor = Cursor::new(write_buffer.as_mut_slice());
+
+ // Write 4 bytes from volatile slice to cursor (amount written limited by volatile slice length)
+ assert_eq!(
+ cursor
+ .write_volatile(&VolatileSlice::from(input.as_mut_slice()))
+ .unwrap(),
+ 4
+ );
+ assert_eq!(cursor.get_ref(), &[1, 2, 3, 4, 0, 0, 0]);
+
+ // Write 3 bytes from volatile slice to cursor (amount written limited by remaining space in cursor)
+ assert_eq!(
+ cursor
+ .write_volatile(&VolatileSlice::from(input.as_mut_slice()))
+ .unwrap(),
+ 3
+ );
+ assert_eq!(cursor.get_ref(), &[1, 2, 3, 4, 1, 2, 3]);
+ }
+
+ #[test]
+ fn test_write_volatile_for_vec() {
+ let mut write_buffer = Vec::new();
+ let mut input = [1, 2, 3, 4];
+
+ assert_eq!(
+ write_buffer
+ .write_volatile(&VolatileSlice::from(input.as_mut_slice()))
+ .unwrap(),
+ 4
+ );
+
+ assert_eq!(&write_buffer, &input);
+ }
+}
diff --git a/crates/vm-memory/src/lib.rs b/crates/vm-memory/src/lib.rs
index b574dfa..6f87ce4 100644
--- a/crates/vm-memory/src/lib.rs
+++ b/crates/vm-memory/src/lib.rs
@@ -15,9 +15,14 @@
//! without knowing the implementation details of the VM memory provider. Thus hypervisor
//! components, such as boot loader, virtual device drivers, virtio backend drivers and vhost
//! drivers etc, could be shared and reused by multiple hypervisors.
+#![warn(clippy::doc_markdown)]
+#![warn(missing_docs)]
+#![warn(missing_debug_implementations)]
+#![cfg_attr(docsrs, feature(doc_auto_cfg))]
-#![deny(clippy::doc_markdown)]
-#![deny(missing_docs)]
+// We only support 64bit. Fail build when attempting to build other targets
+#[cfg(not(target_pointer_width = "64"))]
+compile_error!("vm-memory only supports 64-bit targets!");
#[macro_use]
pub mod address;
@@ -45,6 +50,9 @@
GuestMemoryRegion, GuestUsize, MemoryRegionAddress, Result as GuestMemoryResult,
};
+pub mod io;
+pub use io::{ReadVolatile, WriteVolatile};
+
#[cfg(all(feature = "backend-mmap", not(feature = "xen"), unix))]
mod mmap_unix;
@@ -56,6 +64,7 @@
#[cfg(feature = "backend-mmap")]
pub mod mmap;
+
#[cfg(feature = "backend-mmap")]
pub use mmap::{Error, GuestMemoryMmap, GuestRegionMmap, MmapRegion};
#[cfg(all(feature = "backend-mmap", feature = "xen", unix))]
diff --git a/crates/vm-memory/src/mmap.rs b/crates/vm-memory/src/mmap.rs
index 0a442e6..48d9a56 100644
--- a/crates/vm-memory/src/mmap.rs
+++ b/crates/vm-memory/src/mmap.rs
@@ -24,8 +24,7 @@
use crate::address::Address;
use crate::bitmap::{Bitmap, BS};
use crate::guest_memory::{
- self, FileOffset, GuestAddress, GuestMemory, GuestMemoryIterator, GuestMemoryRegion,
- GuestUsize, MemoryRegionAddress,
+ self, FileOffset, GuestAddress, GuestMemory, GuestMemoryRegion, GuestUsize, MemoryRegionAddress,
};
use crate::volatile_memory::{VolatileMemory, VolatileSlice};
use crate::{AtomicAccess, Bytes};
@@ -273,6 +272,7 @@
F: Read,
{
let maddr = addr.raw_value() as usize;
+ #[allow(deprecated)] // function itself is deprecated
self.as_volatile_slice()
.unwrap()
.read_from::<F>(maddr, src, count)
@@ -318,6 +318,7 @@
F: Read,
{
let maddr = addr.raw_value() as usize;
+ #[allow(deprecated)] // function itself is deprecated
self.as_volatile_slice()
.unwrap()
.read_exact_from::<F>(maddr, src, count)
@@ -363,6 +364,7 @@
F: Write,
{
let maddr = addr.raw_value() as usize;
+ #[allow(deprecated)] // function itself is deprecated
self.as_volatile_slice()
.unwrap()
.write_to::<F>(maddr, dst, count)
@@ -408,6 +410,7 @@
F: Write,
{
let maddr = addr.raw_value() as usize;
+ #[allow(deprecated)] // function itself is deprecated
self.as_volatile_slice()
.unwrap()
.write_all_to::<F>(maddr, dst, count)
@@ -508,7 +511,7 @@
/// Creates a container and allocates anonymous memory for guest memory regions.
///
- /// Valid memory regions are specified as a sequence of (Address, Size, Option<FileOffset>)
+ /// Valid memory regions are specified as a sequence of (Address, Size, [`Option<FileOffset>`])
/// tuples sorted by Address.
pub fn from_ranges_with_files<A, T>(ranges: T) -> result::Result<Self, Error>
where
@@ -609,27 +612,9 @@
}
}
-/// An iterator over the elements of `GuestMemoryMmap`.
-///
-/// This struct is created by `GuestMemory::iter()`. See its documentation for more.
-pub struct Iter<'a, B>(std::slice::Iter<'a, Arc<GuestRegionMmap<B>>>);
-
-impl<'a, B> Iterator for Iter<'a, B> {
- type Item = &'a GuestRegionMmap<B>;
- fn next(&mut self) -> Option<Self::Item> {
- self.0.next().map(AsRef::as_ref)
- }
-}
-
-impl<'a, B: 'a> GuestMemoryIterator<'a, GuestRegionMmap<B>> for GuestMemoryMmap<B> {
- type Iter = Iter<'a, B>;
-}
-
impl<B: Bitmap + 'static> GuestMemory for GuestMemoryMmap<B> {
type R = GuestRegionMmap<B>;
- type I = Self;
-
fn num_regions(&self) -> usize {
self.regions.len()
}
@@ -644,8 +629,8 @@
index.map(|x| self.regions[x].as_ref())
}
- fn iter(&self) -> Iter<B> {
- Iter(self.regions.iter())
+ fn iter(&self) -> impl Iterator<Item = &Self::R> {
+ self.regions.iter().map(AsRef::as_ref)
}
}
@@ -946,7 +931,7 @@
])
.unwrap();
- let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file];
+ let guest_mem_list = [guest_mem, guest_mem_backed_by_file];
for guest_mem in guest_mem_list.iter() {
assert!(guest_mem.address_in_range(GuestAddress(0x200)));
assert!(!guest_mem.address_in_range(GuestAddress(0x600)));
@@ -972,7 +957,7 @@
])
.unwrap();
- let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file];
+ let guest_mem_list = [guest_mem, guest_mem_backed_by_file];
for guest_mem in guest_mem_list.iter() {
assert_eq!(
guest_mem.check_address(GuestAddress(0x200)),
@@ -1004,7 +989,7 @@
])
.unwrap();
- let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file];
+ let guest_mem_list = [guest_mem, guest_mem_backed_by_file];
for guest_mem in guest_mem_list.iter() {
assert!(guest_mem.to_region_addr(GuestAddress(0x600)).is_none());
let (r0, addr0) = guest_mem.to_region_addr(GuestAddress(0x800)).unwrap();
@@ -1032,7 +1017,7 @@
])
.unwrap();
- let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file];
+ let guest_mem_list = [guest_mem, guest_mem_backed_by_file];
for guest_mem in guest_mem_list.iter() {
assert!(guest_mem.get_host_address(GuestAddress(0x600)).is_err());
let ptr0 = guest_mem.get_host_address(GuestAddress(0x800)).unwrap();
@@ -1059,7 +1044,7 @@
)])
.unwrap();
- let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file];
+ let guest_mem_list = [guest_mem, guest_mem_backed_by_file];
for guest_mem in guest_mem_list.iter() {
let sample_buf = &[1, 2, 3, 4, 5];
@@ -1097,7 +1082,7 @@
])
.unwrap();
- let gm_list = vec![gm, gm_backed_by_file];
+ let gm_list = [gm, gm_backed_by_file];
for gm in gm_list.iter() {
let val1: u64 = 0xaa55_aa55_aa55_aa55;
let val2: u64 = 0x55aa_55aa_55aa_55aa;
@@ -1137,7 +1122,7 @@
)])
.unwrap();
- let gm_list = vec![gm, gm_backed_by_file];
+ let gm_list = [gm, gm_backed_by_file];
for gm in gm_list.iter() {
let sample_buf = &[1, 2, 3, 4, 5];
@@ -1168,7 +1153,7 @@
)])
.unwrap();
- let gm_list = vec![gm, gm_backed_by_file];
+ let gm_list = [gm, gm_backed_by_file];
for gm in gm_list.iter() {
let addr = GuestAddress(0x1010);
let mut file = if cfg!(unix) {
@@ -1177,7 +1162,7 @@
File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe")).unwrap()
};
gm.write_obj(!0u32, addr).unwrap();
- gm.read_exact_from(addr, &mut file, mem::size_of::<u32>())
+ gm.read_exact_volatile_from(addr, &mut file, mem::size_of::<u32>())
.unwrap();
let value: u32 = gm.read_obj(addr).unwrap();
if cfg!(unix) {
@@ -1186,8 +1171,8 @@
assert_eq!(value, 0x0090_5a4d);
}
- let mut sink = Vec::new();
- gm.write_all_to(addr, &mut sink, mem::size_of::<u32>())
+ let mut sink = vec![0; mem::size_of::<u32>()];
+ gm.write_all_volatile_to(addr, &mut sink.as_mut_slice(), mem::size_of::<u32>())
.unwrap();
if cfg!(unix) {
assert_eq!(sink, vec![0; mem::size_of::<u32>()]);
@@ -1271,7 +1256,7 @@
])
.unwrap();
- let gm_list = vec![gm, gm_backed_by_file];
+ let gm_list = [gm, gm_backed_by_file];
for gm in gm_list.iter() {
let sample_buf = &[1, 2, 3, 4, 5];
assert_eq!(gm.write(sample_buf, GuestAddress(0xffc)).unwrap(), 5);
@@ -1484,7 +1469,7 @@
Some(GuestAddress(0xfff))
);
assert_eq!(guest_mem.checked_offset(start_addr2, 0xc00), None);
- assert_eq!(guest_mem.checked_offset(start_addr1, std::usize::MAX), None);
+ assert_eq!(guest_mem.checked_offset(start_addr1, usize::MAX), None);
assert_eq!(guest_mem.checked_offset(start_addr1, 0x400), None);
assert_eq!(
@@ -1513,7 +1498,7 @@
assert!(guest_mem.check_range(start_addr2, 0x800));
assert!(!guest_mem.check_range(start_addr2, 0x801));
assert!(!guest_mem.check_range(start_addr2, 0xc00));
- assert!(!guest_mem.check_range(start_addr1, std::usize::MAX));
+ assert!(!guest_mem.check_range(start_addr1, usize::MAX));
}
#[test]
diff --git a/crates/vm-memory/src/mmap_unix.rs b/crates/vm-memory/src/mmap_unix.rs
index c1d1adb..14ceb80 100644
--- a/crates/vm-memory/src/mmap_unix.rs
+++ b/crates/vm-memory/src/mmap_unix.rs
@@ -52,6 +52,7 @@
pub type Result<T> = result::Result<T, Error>;
/// A factory struct to build `MmapRegion` objects.
+#[derive(Debug)]
pub struct MmapRegionBuilder<B = ()> {
size: usize,
prot: i32,
@@ -445,6 +446,7 @@
use super::*;
use std::io::Write;
+ use std::num::NonZeroUsize;
use std::slice;
use std::sync::Arc;
use vmm_sys_util::tempfile::TempFile;
@@ -453,13 +455,14 @@
type MmapRegion = super::MmapRegion<()>;
- // Adding a helper method to extract the errno within an Error::Mmap(e), or return a
- // distinctive value when the error is represented by another variant.
impl Error {
+ /// Helper method to extract the errno within an
+ /// `Error::Mmap(e)`. Returns `i32::MIN` if `self` is any
+ /// other variant.
pub fn raw_os_error(&self) -> i32 {
match self {
Error::Mmap(e) => e.raw_os_error().unwrap(),
- _ => std::i32::MIN,
+ _ => i32::MIN,
}
}
}
@@ -550,7 +553,7 @@
// Offset + size will overflow.
let r = MmapRegion::build(
- Some(FileOffset::from_arc(a.clone(), std::u64::MAX)),
+ Some(FileOffset::from_arc(a.clone(), u64::MAX)),
size,
prot,
flags,
@@ -598,7 +601,7 @@
assert!(r.owned());
let region_size = 0x10_0000;
- let bitmap = AtomicBitmap::new(region_size, 0x1000);
+ let bitmap = AtomicBitmap::new(region_size, unsafe { NonZeroUsize::new_unchecked(0x1000) });
let builder = MmapRegionBuilder::new_with_bitmap(region_size, bitmap)
.with_hugetlbfs(true)
.with_mmap_prot(libc::PROT_READ | libc::PROT_WRITE);
diff --git a/crates/vm-memory/src/mmap_xen.rs b/crates/vm-memory/src/mmap_xen.rs
index b641311..31d08e2 100644
--- a/crates/vm-memory/src/mmap_xen.rs
+++ b/crates/vm-memory/src/mmap_xen.rs
@@ -432,6 +432,7 @@
// Bit mask for the vhost-user xen mmap message.
bitflags! {
/// Flags for the Xen mmap message.
+ #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct MmapXenFlags: u32 {
/// Standard Unix memory mapping.
const UNIX = 0x0;
@@ -1035,7 +1036,7 @@
fn raw_os_error(&self) -> i32 {
match self {
Error::Mmap(e) => e.raw_os_error().unwrap(),
- _ => std::i32::MIN,
+ _ => i32::MIN,
}
}
}
@@ -1062,6 +1063,7 @@
}
impl MmapRegion {
+ /// Create an `MmapRegion` with specified `size` at GuestAdress(0)
pub fn new(size: usize) -> Result<Self> {
let range = MmapRange::new_unix(size, None, GuestAddress(0));
Self::from_range(range)
diff --git a/crates/vm-memory/src/volatile_memory.rs b/crates/vm-memory/src/volatile_memory.rs
index 76e41bb..30e1038 100644
--- a/crates/vm-memory/src/volatile_memory.rs
+++ b/crates/vm-memory/src/volatile_memory.rs
@@ -18,10 +18,10 @@
//! For the purposes of maintaining safety, volatile memory has some rules of its own:
//! 1. No references or slices to volatile memory (`&` or `&mut`).
//! 2. Access should always been done with a volatile read or write.
-//! The First rule is because having references of any kind to memory considered volatile would
-//! violate pointer aliasing. The second is because unvolatile accesses are inherently undefined if
-//! done concurrently without synchronization. With volatile access we know that the compiler has
-//! not reordered or elided the access.
+//! The First rule is because having references of any kind to memory considered volatile would
+//! violate pointer aliasing. The second is because unvolatile accesses are inherently undefined if
+//! done concurrently without synchronization. With volatile access we know that the compiler has
+//! not reordered or elided the access.
use std::cmp::min;
use std::io::{self, Read, Write};
@@ -31,7 +31,6 @@
use std::ptr::{read_volatile, write_volatile};
use std::result;
use std::sync::atomic::Ordering;
-use std::usize;
use crate::atomic_integer::AtomicInteger;
use crate::bitmap::{Bitmap, BitmapSlice, BS};
@@ -43,6 +42,7 @@
#[cfg(not(feature = "xen"))]
type MmapInfo = std::marker::PhantomData<()>;
+use crate::io::{ReadVolatile, WriteVolatile};
use copy_slice_impl::{copy_from_volatile_slice, copy_to_volatile_slice};
/// `VolatileMemory` related errors.
@@ -301,6 +301,7 @@
struct Packed<T>(T);
/// A guard to perform mapping and protect unmapping of the memory.
+#[derive(Debug)]
pub struct PtrGuard {
addr: *mut u8,
len: usize,
@@ -346,6 +347,7 @@
}
/// A mutable guard to perform mapping and protect unmapping of the memory.
+#[derive(Debug)]
pub struct PtrGuardMut(PtrGuard);
#[allow(clippy::len_without_is_empty)]
@@ -682,7 +684,7 @@
/// assert!(res.is_ok());
/// assert_eq!(res.unwrap(), 4);
/// ```
- fn write(&self, buf: &[u8], addr: usize) -> Result<usize> {
+ fn write(&self, mut buf: &[u8], addr: usize) -> Result<usize> {
if buf.is_empty() {
return Ok(0);
}
@@ -691,18 +693,10 @@
return Err(Error::OutOfBounds { addr });
}
- let total = buf.len().min(self.len() - addr);
- let dst = self.subslice(addr, total)?;
-
- // SAFETY:
- // We check above that `addr` is a valid offset within this volatile slice, and by
- // the invariants of `VolatileSlice::new`, this volatile slice points to contiguous
- // memory of length self.len(). Furthermore, both src and dst of the call to
- // copy_to_volatile_slice are valid for reads and writes respectively of length `total`
- // since total is the minimum of lengths of the memory areas pointed to. The areas do not
- // overlap, since `dst` is inside guest memory, and buf is a slice (no slices to guest
- // memory are possible without violating rust's aliasing rules).
- Ok(unsafe { copy_to_volatile_slice(&dst, buf.as_ptr(), total) })
+ // NOTE: the duality of read <-> write here is correct. This is because we translate a call
+ // "volatile_slice.write(buf)" (e.g. "write to volatile_slice from buf") into
+ // "buf.read_volatile(volatile_slice)" (e.g. read from buf into volatile_slice)
+ buf.read_volatile(&mut self.offset(addr)?)
}
/// # Examples
@@ -719,7 +713,7 @@
/// assert!(res.is_ok());
/// assert_eq!(res.unwrap(), 14);
/// ```
- fn read(&self, buf: &mut [u8], addr: usize) -> Result<usize> {
+ fn read(&self, mut buf: &mut [u8], addr: usize) -> Result<usize> {
if buf.is_empty() {
return Ok(0);
}
@@ -728,18 +722,11 @@
return Err(Error::OutOfBounds { addr });
}
- let total = buf.len().min(self.len() - addr);
- let src = self.subslice(addr, total)?;
-
- // SAFETY:
- // We check above that `addr` is a valid offset within this volatile slice, and by
- // the invariants of `VolatileSlice::new`, this volatile slice points to contiguous
- // memory of length self.len(). Furthermore, both src and dst of the call to
- // copy_from_volatile_slice are valid for reads and writes respectively of length `total`
- // since total is the minimum of lengths of the memory areas pointed to. The areas do not
- // overlap, since `dst` is inside guest memory, and buf is a slice (no slices to guest
- // memory are possible without violating rust's aliasing rules).
- unsafe { Ok(copy_from_volatile_slice(buf.as_mut_ptr(), &src, total)) }
+ // NOTE: The duality of read <-> write here is correct. This is because we translate a call
+ // volatile_slice.read(buf) (e.g. read from volatile_slice into buf) into
+ // "buf.write_volatile(volatile_slice)" (e.g. write into buf from volatile_slice)
+ // Both express data transfer from volatile_slice to buf.
+ buf.write_volatile(&self.offset(addr)?)
}
/// # Examples
@@ -1512,7 +1499,7 @@
addr & (!addr + 1)
}
-mod copy_slice_impl {
+pub(crate) mod copy_slice_impl {
use super::*;
// SAFETY: Has the same safety requirements as `read_volatile` + `write_volatile`, namely:
@@ -1610,7 +1597,7 @@
///
/// SAFETY: `slice` and `dst` must be point to a contiguously allocated memory region of at
/// least length `total`. The regions must not overlap.
- pub(super) unsafe fn copy_from_volatile_slice<B: BitmapSlice>(
+ pub(crate) unsafe fn copy_from_volatile_slice<B: BitmapSlice>(
dst: *mut u8,
slice: &VolatileSlice<'_, B>,
total: usize,
@@ -1625,7 +1612,7 @@
///
/// SAFETY: `slice` and `src` must be point to a contiguously allocated memory region of at
/// least length `total`. The regions must not overlap.
- pub(super) unsafe fn copy_to_volatile_slice<B: BitmapSlice>(
+ pub(crate) unsafe fn copy_to_volatile_slice<B: BitmapSlice>(
slice: &VolatileSlice<'_, B>,
src: *const u8,
total: usize,
@@ -1647,7 +1634,6 @@
use std::alloc::Layout;
use std::fs::File;
- use std::io::Cursor;
use std::mem::size_of_val;
use std::path::Path;
use std::sync::atomic::{AtomicUsize, Ordering};
@@ -1655,6 +1641,7 @@
use std::thread::spawn;
use matches::assert_matches;
+ use std::num::NonZeroUsize;
use vmm_sys_util::tempfile::TempFile;
use crate::bitmap::tests::{
@@ -1662,6 +1649,8 @@
};
use crate::bitmap::{AtomicBitmap, RefSlice};
+ const DEFAULT_PAGE_SIZE: NonZeroUsize = unsafe { NonZeroUsize::new_unchecked(0x1000) };
+
#[test]
fn test_display_error() {
assert_eq!(
@@ -1899,8 +1888,8 @@
assert!(slice.subslice(101, 0).is_err());
assert!(slice.subslice(101, 1).is_err());
- assert!(slice.subslice(std::usize::MAX, 2).is_err());
- assert!(slice.subslice(2, std::usize::MAX).is_err());
+ assert!(slice.subslice(usize::MAX, 2).is_err());
+ assert!(slice.subslice(2, usize::MAX).is_err());
let maybe_offset_slice = slice.subslice(10, 80);
assert!(maybe_offset_slice.is_ok());
@@ -2008,14 +1997,13 @@
#[test]
fn slice_overflow_error() {
- use std::usize::MAX;
let mut backing = vec![0u8];
let a = VolatileSlice::from(backing.as_mut_slice());
- let res = a.get_slice(MAX, 1).unwrap_err();
+ let res = a.get_slice(usize::MAX, 1).unwrap_err();
assert_matches!(
res,
Error::Overflow {
- base: MAX,
+ base: usize::MAX,
offset: 1,
}
);
@@ -2032,14 +2020,13 @@
#[test]
fn ref_overflow_error() {
- use std::usize::MAX;
let mut backing = vec![0u8];
let a = VolatileSlice::from(backing.as_mut_slice());
- let res = a.get_ref::<u8>(MAX).unwrap_err();
+ let res = a.get_ref::<u8>(usize::MAX).unwrap_err();
assert_matches!(
res,
Error::Overflow {
- base: MAX,
+ base: usize::MAX,
offset: 1,
}
);
@@ -2114,11 +2101,11 @@
let a = VolatileSlice::from(backing.as_mut_slice());
let s = a.as_volatile_slice();
assert!(s.write_obj(55u16, 4).is_err());
- assert!(s.write_obj(55u16, core::usize::MAX).is_err());
+ assert!(s.write_obj(55u16, usize::MAX).is_err());
assert!(s.write_obj(55u16, 2).is_ok());
assert_eq!(s.read_obj::<u16>(2).unwrap(), 55u16);
assert!(s.read_obj::<u16>(4).is_err());
- assert!(s.read_obj::<u16>(core::usize::MAX).is_err());
+ assert!(s.read_obj::<u16>(usize::MAX).is_err());
}
#[test]
@@ -2132,16 +2119,15 @@
} else {
File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe")).unwrap()
};
- assert!(s.read_exact_from(2, &mut file, size_of::<u32>()).is_err());
- assert!(s
- .read_exact_from(core::usize::MAX, &mut file, size_of::<u32>())
- .is_err());
- assert!(s.read_exact_from(1, &mut file, size_of::<u32>()).is_ok());
+ assert!(file
+ .read_exact_volatile(&mut s.get_slice(1, size_of::<u32>()).unwrap())
+ .is_ok());
let mut f = TempFile::new().unwrap().into_file();
- assert!(s.read_exact_from(1, &mut f, size_of::<u32>()).is_err());
- format!("{:?}", s.read_exact_from(1, &mut f, size_of::<u32>()));
+ assert!(f
+ .read_exact_volatile(&mut s.get_slice(1, size_of::<u32>()).unwrap())
+ .is_err());
let value = s.read_obj::<u32>(1).unwrap();
if cfg!(unix) {
@@ -2150,13 +2136,12 @@
assert_eq!(value, 0x0090_5a4d);
}
- let mut sink = Vec::new();
- assert!(s.write_all_to(1, &mut sink, size_of::<u32>()).is_ok());
- assert!(s.write_all_to(2, &mut sink, size_of::<u32>()).is_err());
- assert!(s
- .write_all_to(core::usize::MAX, &mut sink, size_of::<u32>())
- .is_err());
- format!("{:?}", s.write_all_to(2, &mut sink, size_of::<u32>()));
+ let mut sink = vec![0; size_of::<u32>()];
+ assert!(sink
+ .as_mut_slice()
+ .write_all_volatile(&s.get_slice(1, size_of::<u32>()).unwrap())
+ .is_ok());
+
if cfg!(unix) {
assert_eq!(sink, vec![0; size_of::<u32>()]);
} else {
@@ -2190,16 +2175,15 @@
}
unsafe impl ByteValued for BytesToRead {}
let cursor_size = 20;
- let mut image = Cursor::new(vec![1u8; cursor_size]);
+ let image = vec![1u8; cursor_size];
- // Trying to read more bytes than we have available in the cursor should
- // make the read_from function return maximum cursor size (i.e. 20).
+ // Trying to read more bytes than we have space for in image
+ // make the read_from function return maximum vec size (i.e. 20).
let mut bytes_to_read = BytesToRead::default();
- let size_of_bytes = size_of_val(&bytes_to_read);
assert_eq!(
- bytes_to_read
- .as_bytes()
- .read_from(0, &mut image, size_of_bytes)
+ image
+ .as_slice()
+ .read_volatile(&mut bytes_to_read.as_bytes())
.unwrap(),
cursor_size
);
@@ -2314,14 +2298,13 @@
let val = 123u64;
let dirty_offset = 0x1000;
let dirty_len = size_of_val(&val);
- let page_size = 0x1000;
let len = 0x10000;
let buf = unsafe { std::alloc::alloc_zeroed(Layout::from_size_align(len, 8).unwrap()) };
// Invoke the `Bytes` test helper function.
{
- let bitmap = AtomicBitmap::new(len, page_size);
+ let bitmap = AtomicBitmap::new(len, DEFAULT_PAGE_SIZE);
let slice = unsafe { VolatileSlice::with_bitmap(buf, len, bitmap.slice_at(0), None) };
test_bytes(
@@ -2337,18 +2320,18 @@
// Invoke the `VolatileMemory` test helper function.
{
- let bitmap = AtomicBitmap::new(len, page_size);
+ let bitmap = AtomicBitmap::new(len, DEFAULT_PAGE_SIZE);
let slice = unsafe { VolatileSlice::with_bitmap(buf, len, bitmap.slice_at(0), None) };
test_volatile_memory(&slice);
}
- let bitmap = AtomicBitmap::new(len, page_size);
+ let bitmap = AtomicBitmap::new(len, DEFAULT_PAGE_SIZE);
let slice = unsafe { VolatileSlice::with_bitmap(buf, len, bitmap.slice_at(0), None) };
- let bitmap2 = AtomicBitmap::new(len, page_size);
+ let bitmap2 = AtomicBitmap::new(len, DEFAULT_PAGE_SIZE);
let slice2 = unsafe { VolatileSlice::with_bitmap(buf, len, bitmap2.slice_at(0), None) };
- let bitmap3 = AtomicBitmap::new(len, page_size);
+ let bitmap3 = AtomicBitmap::new(len, DEFAULT_PAGE_SIZE);
let slice3 = unsafe { VolatileSlice::with_bitmap(buf, len, bitmap3.slice_at(0), None) };
assert!(range_is_clean(slice.bitmap(), 0, slice.len()));
@@ -2404,9 +2387,8 @@
fn test_volatile_ref_dirty_tracking() {
let val = 123u64;
let mut buf = vec![val];
- let page_size = 0x1000;
- let bitmap = AtomicBitmap::new(size_of_val(&val), page_size);
+ let bitmap = AtomicBitmap::new(size_of_val(&val), DEFAULT_PAGE_SIZE);
let vref = unsafe {
VolatileRef::with_bitmap(buf.as_mut_ptr() as *mut u8, bitmap.slice_at(0), None)
};
@@ -2416,8 +2398,11 @@
assert!(range_is_dirty(vref.bitmap(), 0, vref.len()));
}
- fn test_volatile_array_ref_copy_from_tracking<T>(buf: &mut [T], index: usize, page_size: usize)
- where
+ fn test_volatile_array_ref_copy_from_tracking<T>(
+ buf: &mut [T],
+ index: usize,
+ page_size: NonZeroUsize,
+ ) where
T: ByteValued + From<u8>,
{
let bitmap = AtomicBitmap::new(size_of_val(buf), page_size);
@@ -2444,14 +2429,13 @@
let dirty_len = size_of_val(&val);
let index = 0x1000;
let dirty_offset = dirty_len * index;
- let page_size = 0x1000;
let mut buf = vec![0u64; index + 1];
let mut byte_buf = vec![0u8; index + 1];
// Test `ref_at`.
{
- let bitmap = AtomicBitmap::new(buf.len() * size_of_val(&val), page_size);
+ let bitmap = AtomicBitmap::new(buf.len() * size_of_val(&val), DEFAULT_PAGE_SIZE);
let arr = unsafe {
VolatileArrayRef::with_bitmap(
buf.as_mut_ptr() as *mut u8,
@@ -2468,7 +2452,7 @@
// Test `store`.
{
- let bitmap = AtomicBitmap::new(buf.len() * size_of_val(&val), page_size);
+ let bitmap = AtomicBitmap::new(buf.len() * size_of_val(&val), DEFAULT_PAGE_SIZE);
let arr = unsafe {
VolatileArrayRef::with_bitmap(
buf.as_mut_ptr() as *mut u8,
@@ -2485,8 +2469,8 @@
}
// Test `copy_from` when size_of::<T>() == 1.
- test_volatile_array_ref_copy_from_tracking(&mut byte_buf, index, page_size);
+ test_volatile_array_ref_copy_from_tracking(&mut byte_buf, index, DEFAULT_PAGE_SIZE);
// Test `copy_from` when size_of::<T>() > 1.
- test_volatile_array_ref_copy_from_tracking(&mut buf, index, page_size);
+ test_volatile_array_ref_copy_from_tracking(&mut buf, index, DEFAULT_PAGE_SIZE);
}
}
diff --git a/pseudo_crate/Cargo.lock b/pseudo_crate/Cargo.lock
index 1659e9e..8bb366a 100644
--- a/pseudo_crate/Cargo.lock
+++ b/pseudo_crate/Cargo.lock
@@ -468,7 +468,7 @@
"virtio-drivers",
"virtio-queue 0.12.0",
"virtio-vsock",
- "vm-memory 0.12.2",
+ "vm-memory 0.16.0",
"vmm-sys-util 0.12.1",
"vsock",
"vsprintf",
@@ -5936,6 +5936,17 @@
]
[[package]]
+name = "vm-memory"
+version = "0.16.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e2919f87420b6998a131eb7c78843890295e91a3f8f786ccc925c8d387b75121"
+dependencies = [
+ "libc",
+ "thiserror 1.0.49",
+ "winapi",
+]
+
+[[package]]
name = "vmm-sys-util"
version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
diff --git a/pseudo_crate/Cargo.toml b/pseudo_crate/Cargo.toml
index e740f9b..a445cbc 100644
--- a/pseudo_crate/Cargo.toml
+++ b/pseudo_crate/Cargo.toml
@@ -377,7 +377,7 @@
virtio-drivers = "=0.7.5"
virtio-queue = "=0.12.0"
virtio-vsock = "=0.6.0"
-vm-memory = "=0.12.2"
+vm-memory = "=0.16.0"
vmm-sys-util = "=0.12.1"
vsock = "=0.5.0"
vsprintf = "=2.0.0"